2024-11-23 15:24:12,317 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-23 15:24:12,329 main DEBUG Took 0.009985 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-23 15:24:12,329 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-23 15:24:12,329 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-23 15:24:12,330 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-23 15:24:12,331 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,338 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-23 15:24:12,350 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,351 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,352 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,352 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,352 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,353 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,353 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,354 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,354 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,354 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,355 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,355 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,356 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,356 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,356 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,357 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,357 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,357 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,358 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,358 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,358 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,359 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,359 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,359 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 15:24:12,360 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,360 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-23 15:24:12,361 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 15:24:12,362 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-23 15:24:12,364 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-23 15:24:12,365 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-23 15:24:12,366 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-23 15:24:12,366 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-23 15:24:12,374 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-23 15:24:12,376 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-23 15:24:12,378 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-23 15:24:12,378 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-23 15:24:12,378 main DEBUG createAppenders(={Console}) 2024-11-23 15:24:12,379 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-23 15:24:12,379 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-23 15:24:12,380 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-23 15:24:12,380 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-23 15:24:12,380 main DEBUG OutputStream closed 2024-11-23 15:24:12,381 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-23 15:24:12,381 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-23 15:24:12,381 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-23 15:24:12,448 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-23 15:24:12,450 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-23 15:24:12,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-23 15:24:12,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-23 15:24:12,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-23 15:24:12,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-23 15:24:12,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-23 15:24:12,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-23 15:24:12,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-23 15:24:12,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-23 15:24:12,454 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-23 15:24:12,454 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-23 15:24:12,454 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-23 15:24:12,455 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-23 15:24:12,455 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-23 15:24:12,455 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-23 15:24:12,455 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-23 15:24:12,456 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-23 15:24:12,459 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23 15:24:12,459 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-23 15:24:12,459 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-23 15:24:12,460 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-23T15:24:12,752 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4 2024-11-23 15:24:12,755 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-23 15:24:12,755 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23T15:24:12,765 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-23T15:24:12,784 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T15:24:12,787 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/cluster_ab4327e8-be6a-a24d-51a5-85c3c7bb54b1, deleteOnExit=true 2024-11-23T15:24:12,787 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-23T15:24:12,788 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/test.cache.data in system properties and HBase conf 2024-11-23T15:24:12,789 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T15:24:12,789 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/hadoop.log.dir in system properties and HBase conf 2024-11-23T15:24:12,790 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T15:24:12,790 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T15:24:12,791 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-23T15:24:12,907 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-23T15:24:12,995 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T15:24:12,998 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T15:24:12,999 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T15:24:12,999 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T15:24:13,000 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T15:24:13,000 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T15:24:13,001 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T15:24:13,001 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T15:24:13,001 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T15:24:13,002 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T15:24:13,002 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/nfs.dump.dir in system properties and HBase conf 2024-11-23T15:24:13,002 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/java.io.tmpdir in system properties and HBase conf 2024-11-23T15:24:13,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T15:24:13,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T15:24:13,003 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T15:24:13,826 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-23T15:24:13,903 INFO [Time-limited test {}] log.Log(170): Logging initialized @2265ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-23T15:24:13,985 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:24:14,047 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T15:24:14,067 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T15:24:14,067 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T15:24:14,068 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T15:24:14,081 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:24:14,083 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/hadoop.log.dir/,AVAILABLE} 2024-11-23T15:24:14,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T15:24:14,273 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/java.io.tmpdir/jetty-localhost-34183-hadoop-hdfs-3_4_1-tests_jar-_-any-3824857979668518562/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T15:24:14,280 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:34183} 2024-11-23T15:24:14,280 INFO [Time-limited test {}] server.Server(415): Started @2643ms 2024-11-23T15:24:14,669 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T15:24:14,675 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T15:24:14,676 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T15:24:14,677 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T15:24:14,677 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T15:24:14,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/hadoop.log.dir/,AVAILABLE} 2024-11-23T15:24:14,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T15:24:14,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/java.io.tmpdir/jetty-localhost-39335-hadoop-hdfs-3_4_1-tests_jar-_-any-17974736527655669816/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:24:14,799 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:39335} 2024-11-23T15:24:14,800 INFO [Time-limited test {}] server.Server(415): Started @3163ms 2024-11-23T15:24:14,856 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T15:24:15,304 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/cluster_ab4327e8-be6a-a24d-51a5-85c3c7bb54b1/dfs/data/data1/current/BP-1918544696-172.17.0.2-1732375453586/current, will proceed with Du for space computation calculation, 2024-11-23T15:24:15,304 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/cluster_ab4327e8-be6a-a24d-51a5-85c3c7bb54b1/dfs/data/data2/current/BP-1918544696-172.17.0.2-1732375453586/current, will proceed with Du for space computation calculation, 2024-11-23T15:24:15,345 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T15:24:15,401 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2026f6209236590 with lease ID 0x843cb0c538d7b2a3: Processing first storage report for DS-5edd23db-b24f-4168-969f-4334cca64a02 from datanode DatanodeRegistration(127.0.0.1:33485, datanodeUuid=fa81f3d8-5d4c-4f82-8f34-6d25fc680d96, infoPort=43687, infoSecurePort=0, ipcPort=46657, storageInfo=lv=-57;cid=testClusterID;nsid=1519534250;c=1732375453587) 2024-11-23T15:24:15,402 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2026f6209236590 with lease ID 0x843cb0c538d7b2a3: from storage DS-5edd23db-b24f-4168-969f-4334cca64a02 node DatanodeRegistration(127.0.0.1:33485, datanodeUuid=fa81f3d8-5d4c-4f82-8f34-6d25fc680d96, infoPort=43687, infoSecurePort=0, ipcPort=46657, storageInfo=lv=-57;cid=testClusterID;nsid=1519534250;c=1732375453587), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T15:24:15,402 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2026f6209236590 with lease ID 0x843cb0c538d7b2a3: Processing first storage report for DS-12ee3e9e-85ea-4da2-b7fc-32395bc2749f from datanode DatanodeRegistration(127.0.0.1:33485, datanodeUuid=fa81f3d8-5d4c-4f82-8f34-6d25fc680d96, infoPort=43687, infoSecurePort=0, ipcPort=46657, storageInfo=lv=-57;cid=testClusterID;nsid=1519534250;c=1732375453587) 2024-11-23T15:24:15,402 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2026f6209236590 with lease ID 0x843cb0c538d7b2a3: from storage DS-12ee3e9e-85ea-4da2-b7fc-32395bc2749f node DatanodeRegistration(127.0.0.1:33485, datanodeUuid=fa81f3d8-5d4c-4f82-8f34-6d25fc680d96, infoPort=43687, infoSecurePort=0, ipcPort=46657, storageInfo=lv=-57;cid=testClusterID;nsid=1519534250;c=1732375453587), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T15:24:15,405 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4 2024-11-23T15:24:15,480 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/cluster_ab4327e8-be6a-a24d-51a5-85c3c7bb54b1/zookeeper_0, clientPort=62881, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/cluster_ab4327e8-be6a-a24d-51a5-85c3c7bb54b1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/cluster_ab4327e8-be6a-a24d-51a5-85c3c7bb54b1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T15:24:15,490 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=62881 2024-11-23T15:24:15,500 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:24:15,502 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:24:15,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741825_1001 (size=7) 2024-11-23T15:24:16,138 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704 with version=8 2024-11-23T15:24:16,139 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/hbase-staging 2024-11-23T15:24:16,267 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-23T15:24:16,535 INFO [Time-limited test {}] client.ConnectionUtils(129): master/6a36843bf905:0 server-side Connection retries=45 2024-11-23T15:24:16,555 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:24:16,555 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T15:24:16,556 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T15:24:16,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:24:16,556 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T15:24:16,690 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T15:24:16,755 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-23T15:24:16,764 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-23T15:24:16,768 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T15:24:16,796 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 26469 (auto-detected) 2024-11-23T15:24:16,797 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-23T15:24:16,817 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35555 2024-11-23T15:24:16,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:24:16,827 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:24:16,840 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35555 connecting to ZooKeeper ensemble=127.0.0.1:62881 2024-11-23T15:24:16,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:355550x0, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T15:24:16,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35555-0x10024fb5fcd0000 connected 2024-11-23T15:24:16,902 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T15:24:16,905 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:24:16,908 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T15:24:16,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35555 2024-11-23T15:24:16,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35555 2024-11-23T15:24:16,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35555 2024-11-23T15:24:16,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35555 2024-11-23T15:24:16,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35555 2024-11-23T15:24:16,923 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704, hbase.cluster.distributed=false 2024-11-23T15:24:16,986 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/6a36843bf905:0 server-side Connection retries=45 2024-11-23T15:24:16,986 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:24:16,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T15:24:16,987 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T15:24:16,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T15:24:16,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T15:24:16,989 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T15:24:16,992 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T15:24:16,993 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33811 2024-11-23T15:24:16,994 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T15:24:17,000 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T15:24:17,002 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:24:17,005 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:24:17,008 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33811 connecting to ZooKeeper ensemble=127.0.0.1:62881 2024-11-23T15:24:17,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338110x0, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T15:24:17,015 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33811-0x10024fb5fcd0001 connected 2024-11-23T15:24:17,015 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T15:24:17,017 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:24:17,018 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T15:24:17,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33811 2024-11-23T15:24:17,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33811 2024-11-23T15:24:17,020 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33811 2024-11-23T15:24:17,024 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33811 2024-11-23T15:24:17,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33811 2024-11-23T15:24:17,028 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/6a36843bf905,35555,1732375456260 2024-11-23T15:24:17,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:24:17,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:24:17,036 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6a36843bf905,35555,1732375456260 2024-11-23T15:24:17,043 DEBUG [M:0;6a36843bf905:35555 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6a36843bf905:35555 2024-11-23T15:24:17,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T15:24:17,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T15:24:17,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:17,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:17,059 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T15:24:17,060 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T15:24:17,060 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6a36843bf905,35555,1732375456260 from backup master directory 2024-11-23T15:24:17,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6a36843bf905,35555,1732375456260 2024-11-23T15:24:17,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:24:17,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T15:24:17,063 WARN [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T15:24:17,063 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6a36843bf905,35555,1732375456260 2024-11-23T15:24:17,066 INFO [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-23T15:24:17,067 INFO [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-23T15:24:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741826_1002 (size=42) 2024-11-23T15:24:17,142 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/hbase.id with ID: 8a6b7d9c-c804-4d8c-a29f-da47d7c5beff 2024-11-23T15:24:17,184 INFO [master/6a36843bf905:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T15:24:17,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:17,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:17,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741827_1003 (size=196) 2024-11-23T15:24:17,243 INFO [master/6a36843bf905:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:24:17,246 INFO [master/6a36843bf905:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T15:24:17,265 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:17,270 INFO [master/6a36843bf905:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T15:24:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741828_1004 (size=1189) 2024-11-23T15:24:17,724 INFO [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store 2024-11-23T15:24:17,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741829_1005 (size=34) 2024-11-23T15:24:18,145 INFO [master/6a36843bf905:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-23T15:24:18,145 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:18,147 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T15:24:18,147 INFO [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:24:18,147 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:24:18,147 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T15:24:18,147 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:24:18,147 INFO [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:24:18,147 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-23T15:24:18,150 WARN [master/6a36843bf905:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/.initializing 2024-11-23T15:24:18,150 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/WALs/6a36843bf905,35555,1732375456260 2024-11-23T15:24:18,157 INFO [master/6a36843bf905:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T15:24:18,167 INFO [master/6a36843bf905:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a36843bf905%2C35555%2C1732375456260, suffix=, logDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/WALs/6a36843bf905,35555,1732375456260, archiveDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/oldWALs, maxLogs=10 2024-11-23T15:24:18,190 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/WALs/6a36843bf905,35555,1732375456260/6a36843bf905%2C35555%2C1732375456260.1732375458172, exclude list is [], retry=0 2024-11-23T15:24:18,206 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33485,DS-5edd23db-b24f-4168-969f-4334cca64a02,DISK] 2024-11-23T15:24:18,209 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-23T15:24:18,246 INFO [master/6a36843bf905:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/WALs/6a36843bf905,35555,1732375456260/6a36843bf905%2C35555%2C1732375456260.1732375458172 2024-11-23T15:24:18,247 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43687:43687)] 2024-11-23T15:24:18,248 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:24:18,248 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:18,252 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:24:18,253 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:24:18,294 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:24:18,320 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T15:24:18,324 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:18,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:24:18,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:24:18,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T15:24:18,331 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:18,332 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:18,332 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:24:18,335 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T15:24:18,335 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:18,336 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:18,337 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:24:18,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T15:24:18,339 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:18,340 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:18,344 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:24:18,345 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:24:18,354 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T15:24:18,358 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T15:24:18,362 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:24:18,363 INFO [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64223839, jitterRate=-0.042990222573280334}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T15:24:18,367 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-23T15:24:18,369 INFO [master/6a36843bf905:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T15:24:18,398 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e262798, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:18,434 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-23T15:24:18,446 INFO [master/6a36843bf905:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T15:24:18,446 INFO [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T15:24:18,448 INFO [master/6a36843bf905:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T15:24:18,450 INFO [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-23T15:24:18,455 INFO [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-11-23T15:24:18,455 INFO [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T15:24:18,482 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T15:24:18,494 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T15:24:18,496 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-23T15:24:18,498 INFO [master/6a36843bf905:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T15:24:18,499 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T15:24:18,501 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-23T15:24:18,503 INFO [master/6a36843bf905:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T15:24:18,506 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T15:24:18,508 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-23T15:24:18,509 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T15:24:18,510 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T15:24:18,520 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T15:24:18,521 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T15:24:18,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T15:24:18,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T15:24:18,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:18,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:18,526 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=6a36843bf905,35555,1732375456260, sessionid=0x10024fb5fcd0000, setting cluster-up flag (Was=false) 2024-11-23T15:24:18,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:18,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:18,547 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T15:24:18,548 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a36843bf905,35555,1732375456260 2024-11-23T15:24:18,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:18,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:18,562 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T15:24:18,563 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6a36843bf905,35555,1732375456260 2024-11-23T15:24:18,641 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6a36843bf905:33811 2024-11-23T15:24:18,643 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1008): ClusterId : 8a6b7d9c-c804-4d8c-a29f-da47d7c5beff 2024-11-23T15:24:18,646 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T15:24:18,647 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-23T15:24:18,651 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T15:24:18,651 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T15:24:18,653 INFO [master/6a36843bf905:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-23T15:24:18,654 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T15:24:18,654 DEBUG [RS:0;6a36843bf905:33811 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b2c0b21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:18,656 INFO [master/6a36843bf905:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T15:24:18,656 DEBUG [RS:0;6a36843bf905:33811 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a917812, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a36843bf905/172.17.0.2:0 2024-11-23T15:24:18,659 INFO [RS:0;6a36843bf905:33811 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-23T15:24:18,659 INFO [RS:0;6a36843bf905:33811 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-23T15:24:18,659 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-23T15:24:18,661 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(3073): reportForDuty to master=6a36843bf905,35555,1732375456260 with isa=6a36843bf905/172.17.0.2:33811, startcode=1732375456985 2024-11-23T15:24:18,661 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6a36843bf905,35555,1732375456260 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T15:24:18,665 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6a36843bf905:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:24:18,665 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6a36843bf905:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:24:18,665 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6a36843bf905:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:24:18,665 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6a36843bf905:0, corePoolSize=5, maxPoolSize=5 2024-11-23T15:24:18,665 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6a36843bf905:0, corePoolSize=10, maxPoolSize=10 2024-11-23T15:24:18,666 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,666 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6a36843bf905:0, corePoolSize=2, maxPoolSize=2 2024-11-23T15:24:18,666 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,667 INFO [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732375488667 2024-11-23T15:24:18,669 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T15:24:18,671 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-23T15:24:18,671 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T15:24:18,671 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-23T15:24:18,675 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T15:24:18,675 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:18,675 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T15:24:18,675 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T15:24:18,676 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T15:24:18,676 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T15:24:18,676 DEBUG [RS:0;6a36843bf905:33811 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T15:24:18,677 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,678 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T15:24:18,679 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T15:24:18,679 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T15:24:18,682 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T15:24:18,683 INFO [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T15:24:18,685 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6a36843bf905:0:becomeActiveMaster-HFileCleaner.large.0-1732375458684,5,FailOnTimeoutGroup] 2024-11-23T15:24:18,686 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6a36843bf905:0:becomeActiveMaster-HFileCleaner.small.0-1732375458685,5,FailOnTimeoutGroup] 2024-11-23T15:24:18,686 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,686 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T15:24:18,687 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,688 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741831_1007 (size=1039) 2024-11-23T15:24:18,717 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60961, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T15:24:18,723 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35555 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 6a36843bf905,33811,1732375456985 2024-11-23T15:24:18,725 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35555 {}] master.ServerManager(486): Registering regionserver=6a36843bf905,33811,1732375456985 2024-11-23T15:24:18,739 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704 2024-11-23T15:24:18,739 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40979 2024-11-23T15:24:18,739 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-23T15:24:18,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T15:24:18,744 DEBUG [RS:0;6a36843bf905:33811 {}] zookeeper.ZKUtil(111): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6a36843bf905,33811,1732375456985 2024-11-23T15:24:18,745 WARN [RS:0;6a36843bf905:33811 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T15:24:18,745 INFO [RS:0;6a36843bf905:33811 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T15:24:18,745 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/WALs/6a36843bf905,33811,1732375456985 2024-11-23T15:24:18,747 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6a36843bf905,33811,1732375456985] 2024-11-23T15:24:18,758 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-23T15:24:18,770 INFO [RS:0;6a36843bf905:33811 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T15:24:18,782 INFO [RS:0;6a36843bf905:33811 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T15:24:18,785 INFO [RS:0;6a36843bf905:33811 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T15:24:18,785 INFO [RS:0;6a36843bf905:33811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,786 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-23T15:24:18,793 INFO [RS:0;6a36843bf905:33811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,793 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,793 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,794 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,794 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,794 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,794 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6a36843bf905:0, corePoolSize=2, maxPoolSize=2 2024-11-23T15:24:18,794 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,794 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,795 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,795 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,795 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6a36843bf905:0, corePoolSize=1, maxPoolSize=1 2024-11-23T15:24:18,795 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6a36843bf905:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:24:18,795 DEBUG [RS:0;6a36843bf905:33811 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0, corePoolSize=3, maxPoolSize=3 2024-11-23T15:24:18,796 INFO [RS:0;6a36843bf905:33811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,796 INFO [RS:0;6a36843bf905:33811 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,796 INFO [RS:0;6a36843bf905:33811 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,796 INFO [RS:0;6a36843bf905:33811 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,796 INFO [RS:0;6a36843bf905:33811 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a36843bf905,33811,1732375456985-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T15:24:18,816 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T15:24:18,818 INFO [RS:0;6a36843bf905:33811 {}] hbase.ChoreService(168): Chore ScheduledChore name=6a36843bf905,33811,1732375456985-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:18,843 INFO [RS:0;6a36843bf905:33811 {}] regionserver.Replication(204): 6a36843bf905,33811,1732375456985 started 2024-11-23T15:24:18,843 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1767): Serving as 6a36843bf905,33811,1732375456985, RpcServer on 6a36843bf905/172.17.0.2:33811, sessionid=0x10024fb5fcd0001 2024-11-23T15:24:18,844 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T15:24:18,844 DEBUG [RS:0;6a36843bf905:33811 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6a36843bf905,33811,1732375456985 2024-11-23T15:24:18,844 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a36843bf905,33811,1732375456985' 2024-11-23T15:24:18,845 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T15:24:18,846 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T15:24:18,846 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T15:24:18,847 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T15:24:18,847 DEBUG [RS:0;6a36843bf905:33811 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6a36843bf905,33811,1732375456985 2024-11-23T15:24:18,847 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6a36843bf905,33811,1732375456985' 2024-11-23T15:24:18,847 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T15:24:18,847 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T15:24:18,848 DEBUG [RS:0;6a36843bf905:33811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T15:24:18,848 INFO [RS:0;6a36843bf905:33811 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T15:24:18,848 INFO [RS:0;6a36843bf905:33811 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T15:24:18,954 INFO [RS:0;6a36843bf905:33811 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T15:24:18,958 INFO [RS:0;6a36843bf905:33811 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a36843bf905%2C33811%2C1732375456985, suffix=, logDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/WALs/6a36843bf905,33811,1732375456985, archiveDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/oldWALs, maxLogs=32 2024-11-23T15:24:18,978 DEBUG [RS:0;6a36843bf905:33811 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/WALs/6a36843bf905,33811,1732375456985/6a36843bf905%2C33811%2C1732375456985.1732375458960, exclude list is [], retry=0 2024-11-23T15:24:18,982 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33485,DS-5edd23db-b24f-4168-969f-4334cca64a02,DISK] 2024-11-23T15:24:18,986 INFO [RS:0;6a36843bf905:33811 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/WALs/6a36843bf905,33811,1732375456985/6a36843bf905%2C33811%2C1732375456985.1732375458960 2024-11-23T15:24:18,986 DEBUG [RS:0;6a36843bf905:33811 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43687:43687)] 2024-11-23T15:24:19,090 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-23T15:24:19,091 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704 2024-11-23T15:24:19,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741833_1009 (size=32) 2024-11-23T15:24:19,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:19,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T15:24:19,508 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T15:24:19,508 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:19,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:24:19,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T15:24:19,512 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T15:24:19,512 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:19,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:24:19,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T15:24:19,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T15:24:19,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:19,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:24:19,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740 2024-11-23T15:24:19,519 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740 2024-11-23T15:24:19,522 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:24:19,525 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-23T15:24:19,529 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:24:19,530 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61506014, jitterRate=-0.08348897099494934}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:24:19,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-23T15:24:19,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-23T15:24:19,532 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-23T15:24:19,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-23T15:24:19,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T15:24:19,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T15:24:19,533 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-23T15:24:19,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-23T15:24:19,536 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-23T15:24:19,536 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-23T15:24:19,542 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T15:24:19,550 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T15:24:19,552 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T15:24:19,705 DEBUG [6a36843bf905:35555 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T15:24:19,710 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:19,715 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a36843bf905,33811,1732375456985, state=OPENING 2024-11-23T15:24:19,721 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T15:24:19,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:19,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:19,724 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:24:19,724 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:24:19,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:24:19,900 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:19,901 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T15:24:19,905 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T15:24:19,918 INFO [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-23T15:24:19,918 INFO [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T15:24:19,919 INFO [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-23T15:24:19,922 INFO [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6a36843bf905%2C33811%2C1732375456985.meta, suffix=.meta, logDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/WALs/6a36843bf905,33811,1732375456985, archiveDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/oldWALs, maxLogs=32 2024-11-23T15:24:19,945 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/WALs/6a36843bf905,33811,1732375456985/6a36843bf905%2C33811%2C1732375456985.meta.1732375459925.meta, exclude list is [], retry=0 2024-11-23T15:24:19,949 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33485,DS-5edd23db-b24f-4168-969f-4334cca64a02,DISK] 2024-11-23T15:24:19,952 INFO [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/WALs/6a36843bf905,33811,1732375456985/6a36843bf905%2C33811%2C1732375456985.meta.1732375459925.meta 2024-11-23T15:24:19,952 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43687:43687)] 2024-11-23T15:24:19,952 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:24:19,954 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T15:24:20,015 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T15:24:20,020 INFO [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T15:24:20,024 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T15:24:20,025 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:20,025 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-23T15:24:20,025 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-23T15:24:20,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T15:24:20,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T15:24:20,030 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:20,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:24:20,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T15:24:20,033 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T15:24:20,033 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:20,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:24:20,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T15:24:20,035 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T15:24:20,035 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:20,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T15:24:20,037 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740 2024-11-23T15:24:20,040 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740 2024-11-23T15:24:20,043 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:24:20,046 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-23T15:24:20,047 INFO [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63153844, jitterRate=-0.05893439054489136}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:24:20,049 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-23T15:24:20,056 INFO [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732375459894 2024-11-23T15:24:20,068 DEBUG [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T15:24:20,068 INFO [RS_OPEN_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-23T15:24:20,070 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:20,071 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6a36843bf905,33811,1732375456985, state=OPEN 2024-11-23T15:24:20,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T15:24:20,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T15:24:20,077 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:24:20,077 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T15:24:20,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T15:24:20,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=6a36843bf905,33811,1732375456985 in 352 msec 2024-11-23T15:24:20,087 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T15:24:20,087 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 540 msec 2024-11-23T15:24:20,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4900 sec 2024-11-23T15:24:20,092 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732375460092, completionTime=-1 2024-11-23T15:24:20,093 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T15:24:20,093 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-23T15:24:20,132 DEBUG [hconnection-0x10bb86e4-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:20,135 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:20,145 INFO [master/6a36843bf905:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-23T15:24:20,145 INFO [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732375520145 2024-11-23T15:24:20,146 INFO [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732375580145 2024-11-23T15:24:20,146 INFO [master/6a36843bf905:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 52 msec 2024-11-23T15:24:20,167 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a36843bf905,35555,1732375456260-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:20,168 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a36843bf905,35555,1732375456260-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:20,168 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a36843bf905,35555,1732375456260-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:20,170 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6a36843bf905:35555, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:20,170 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:20,175 DEBUG [master/6a36843bf905:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-23T15:24:20,178 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-23T15:24:20,180 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T15:24:20,186 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-23T15:24:20,189 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T15:24:20,190 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:20,192 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T15:24:20,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741835_1011 (size=358) 2024-11-23T15:24:20,607 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7c4a395faaf6c8f523e4a2ccca6ed0d7, NAME => 'hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704 2024-11-23T15:24:20,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741836_1012 (size=42) 2024-11-23T15:24:21,017 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:21,018 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 7c4a395faaf6c8f523e4a2ccca6ed0d7, disabling compactions & flushes 2024-11-23T15:24:21,018 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:24:21,018 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:24:21,018 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. after waiting 0 ms 2024-11-23T15:24:21,018 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:24:21,018 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:24:21,018 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7c4a395faaf6c8f523e4a2ccca6ed0d7: 2024-11-23T15:24:21,020 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T15:24:21,027 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732375461021"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732375461021"}]},"ts":"1732375461021"} 2024-11-23T15:24:21,051 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T15:24:21,053 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T15:24:21,056 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375461053"}]},"ts":"1732375461053"} 2024-11-23T15:24:21,060 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-23T15:24:21,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=7c4a395faaf6c8f523e4a2ccca6ed0d7, ASSIGN}] 2024-11-23T15:24:21,069 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=7c4a395faaf6c8f523e4a2ccca6ed0d7, ASSIGN 2024-11-23T15:24:21,070 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=7c4a395faaf6c8f523e4a2ccca6ed0d7, ASSIGN; state=OFFLINE, location=6a36843bf905,33811,1732375456985; forceNewPlan=false, retain=false 2024-11-23T15:24:21,221 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=7c4a395faaf6c8f523e4a2ccca6ed0d7, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:21,225 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 7c4a395faaf6c8f523e4a2ccca6ed0d7, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:24:21,379 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:21,385 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:24:21,386 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 7c4a395faaf6c8f523e4a2ccca6ed0d7, NAME => 'hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:24:21,386 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:24:21,386 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:21,387 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:24:21,387 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:24:21,389 INFO [StoreOpener-7c4a395faaf6c8f523e4a2ccca6ed0d7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:24:21,392 INFO [StoreOpener-7c4a395faaf6c8f523e4a2ccca6ed0d7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c4a395faaf6c8f523e4a2ccca6ed0d7 columnFamilyName info 2024-11-23T15:24:21,392 DEBUG [StoreOpener-7c4a395faaf6c8f523e4a2ccca6ed0d7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:21,393 INFO [StoreOpener-7c4a395faaf6c8f523e4a2ccca6ed0d7-1 {}] regionserver.HStore(327): Store=7c4a395faaf6c8f523e4a2ccca6ed0d7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:21,394 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/namespace/7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:24:21,395 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/namespace/7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:24:21,398 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:24:21,402 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/namespace/7c4a395faaf6c8f523e4a2ccca6ed0d7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:24:21,403 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 7c4a395faaf6c8f523e4a2ccca6ed0d7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70965601, jitterRate=0.05746985971927643}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T15:24:21,404 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 7c4a395faaf6c8f523e4a2ccca6ed0d7: 2024-11-23T15:24:21,407 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7., pid=6, masterSystemTime=1732375461379 2024-11-23T15:24:21,410 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:24:21,410 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:24:21,411 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=7c4a395faaf6c8f523e4a2ccca6ed0d7, regionState=OPEN, openSeqNum=2, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:21,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T15:24:21,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 7c4a395faaf6c8f523e4a2ccca6ed0d7, server=6a36843bf905,33811,1732375456985 in 190 msec 2024-11-23T15:24:21,422 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T15:24:21,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=7c4a395faaf6c8f523e4a2ccca6ed0d7, ASSIGN in 353 msec 2024-11-23T15:24:21,424 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T15:24:21,424 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375461424"}]},"ts":"1732375461424"} 2024-11-23T15:24:21,427 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-23T15:24:21,431 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T15:24:21,433 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2510 sec 2024-11-23T15:24:21,491 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-23T15:24:21,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:21,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-23T15:24:21,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:24:21,523 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-23T15:24:21,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-23T15:24:21,545 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 25 msec 2024-11-23T15:24:21,557 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-23T15:24:21,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-23T15:24:21,573 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-11-23T15:24:21,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-23T15:24:21,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-23T15:24:21,586 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.522sec 2024-11-23T15:24:21,588 INFO [master/6a36843bf905:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T15:24:21,589 INFO [master/6a36843bf905:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T15:24:21,590 INFO [master/6a36843bf905:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T15:24:21,591 INFO [master/6a36843bf905:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T15:24:21,591 INFO [master/6a36843bf905:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T15:24:21,592 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a36843bf905,35555,1732375456260-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T15:24:21,592 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a36843bf905,35555,1732375456260-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T15:24:21,598 DEBUG [master/6a36843bf905:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-23T15:24:21,599 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T15:24:21,599 INFO [master/6a36843bf905:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6a36843bf905,35555,1732375456260-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T15:24:21,646 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e83c466 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39dee83f 2024-11-23T15:24:21,647 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-23T15:24:21,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b8b597, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:21,659 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-23T15:24:21,659 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-23T15:24:21,670 DEBUG [hconnection-0x4c09ef46-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:21,679 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59056, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:21,691 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=6a36843bf905,35555,1732375456260 2024-11-23T15:24:21,707 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=253, ProcessCount=11, AvailableMemoryMB=4487 2024-11-23T15:24:21,718 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T15:24:21,721 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35540, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T15:24:21,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T15:24:21,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:24:21,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T15:24:21,756 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T15:24:21,757 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:21,759 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T15:24:21,759 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-23T15:24:21,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T15:24:21,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741837_1013 (size=963) 2024-11-23T15:24:21,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T15:24:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T15:24:22,178 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704 2024-11-23T15:24:22,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741838_1014 (size=53) 2024-11-23T15:24:22,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T15:24:22,589 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:22,589 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d6bd711ee7b1117306956b276de6b58d, disabling compactions & flushes 2024-11-23T15:24:22,590 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:22,590 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:22,590 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. after waiting 0 ms 2024-11-23T15:24:22,590 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:22,590 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:22,590 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:22,592 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T15:24:22,593 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732375462592"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732375462592"}]},"ts":"1732375462592"} 2024-11-23T15:24:22,596 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T15:24:22,597 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T15:24:22,598 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375462597"}]},"ts":"1732375462597"} 2024-11-23T15:24:22,600 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T15:24:22,605 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6bd711ee7b1117306956b276de6b58d, ASSIGN}] 2024-11-23T15:24:22,607 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6bd711ee7b1117306956b276de6b58d, ASSIGN 2024-11-23T15:24:22,608 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6bd711ee7b1117306956b276de6b58d, ASSIGN; state=OFFLINE, location=6a36843bf905,33811,1732375456985; forceNewPlan=false, retain=false 2024-11-23T15:24:22,759 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d6bd711ee7b1117306956b276de6b58d, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:22,763 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:24:22,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T15:24:22,916 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:22,922 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:22,923 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:24:22,923 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:22,923 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:22,923 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:22,923 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:22,926 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:22,929 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:24:22,929 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d6bd711ee7b1117306956b276de6b58d columnFamilyName A 2024-11-23T15:24:22,929 DEBUG [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:22,930 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] regionserver.HStore(327): Store=d6bd711ee7b1117306956b276de6b58d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:22,930 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:22,932 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:24:22,933 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d6bd711ee7b1117306956b276de6b58d columnFamilyName B 2024-11-23T15:24:22,933 DEBUG [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:22,934 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] regionserver.HStore(327): Store=d6bd711ee7b1117306956b276de6b58d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:22,934 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:22,936 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:24:22,936 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d6bd711ee7b1117306956b276de6b58d columnFamilyName C 2024-11-23T15:24:22,936 DEBUG [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:22,937 INFO [StoreOpener-d6bd711ee7b1117306956b276de6b58d-1 {}] regionserver.HStore(327): Store=d6bd711ee7b1117306956b276de6b58d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:22,937 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:22,939 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:22,939 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:22,942 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:24:22,944 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:22,947 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:24:22,948 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened d6bd711ee7b1117306956b276de6b58d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70823115, jitterRate=0.05534665286540985}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:24:22,949 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:22,950 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., pid=11, masterSystemTime=1732375462916 2024-11-23T15:24:22,953 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:22,953 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:22,954 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d6bd711ee7b1117306956b276de6b58d, regionState=OPEN, openSeqNum=2, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:22,960 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-23T15:24:22,960 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 in 194 msec 2024-11-23T15:24:22,963 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-23T15:24:22,963 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6bd711ee7b1117306956b276de6b58d, ASSIGN in 355 msec 2024-11-23T15:24:22,965 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T15:24:22,965 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375462965"}]},"ts":"1732375462965"} 2024-11-23T15:24:22,967 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T15:24:22,971 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T15:24:22,973 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2200 sec 2024-11-23T15:24:23,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T15:24:23,883 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-23T15:24:23,889 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e98ea32 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b9fcedf 2024-11-23T15:24:23,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e71e468, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:23,896 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:23,898 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59110, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:23,901 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T15:24:23,903 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T15:24:23,912 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12885408 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9bd0964 2024-11-23T15:24:23,916 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c63ae4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:23,918 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72b32f98 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1324ee83 2024-11-23T15:24:23,922 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f1673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:23,924 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04977266 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b55c24 2024-11-23T15:24:23,927 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee2166f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:23,929 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bbb5d8a to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48068a5 2024-11-23T15:24:23,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f34ff67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:23,935 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18603bb9 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3883f7b 2024-11-23T15:24:23,939 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b5f27aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:23,941 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72e97e4b to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12a1285d 2024-11-23T15:24:23,945 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3b736e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:23,947 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x490457fd to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@527c6d40 2024-11-23T15:24:23,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:23,953 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c8de680 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47fe2fa7 2024-11-23T15:24:23,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6502d571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:23,960 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f6b07e3 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@595e9ebe 2024-11-23T15:24:23,963 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a0471b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:23,969 DEBUG [hconnection-0x17b3816e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:23,969 DEBUG [hconnection-0x4a22ee9a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:23,970 DEBUG [hconnection-0xb857c1a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:23,970 DEBUG [hconnection-0x624e92fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:23,970 DEBUG [hconnection-0x134cb240-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:23,971 DEBUG [hconnection-0x23bc3a57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:23,972 DEBUG [hconnection-0x34e8847a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:23,974 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:23,976 DEBUG [hconnection-0x11fa2cd8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:23,976 DEBUG [hconnection-0x7c3cfe41-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:23,977 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:23,977 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:23,978 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59132, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:23,978 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59138, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:23,982 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59158, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:23,982 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59146, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:23,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-23T15:24:23,986 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:23,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T15:24:23,988 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:23,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:23,990 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:23,994 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:24,005 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59196, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:24,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:24,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T15:24:24,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:24,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:24,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:24,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:24,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:24,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:24,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T15:24:24,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T15:24:24,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:24,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:24,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:24,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/e579954fcc08481095cb741b433be2a4 is 50, key is test_row_0/A:col10/1732375464042/Put/seqid=0 2024-11-23T15:24:24,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375524182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375524185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375524185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375524191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375524195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741839_1015 (size=12001) 2024-11-23T15:24:24,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/e579954fcc08481095cb741b433be2a4 2024-11-23T15:24:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T15:24:24,325 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T15:24:24,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/ddb667c32fef4d9682377b5f2988ca2e is 50, key is test_row_0/B:col10/1732375464042/Put/seqid=0 2024-11-23T15:24:24,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:24,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:24,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:24,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741840_1016 (size=12001) 2024-11-23T15:24:24,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375524349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375524349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375524349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375524350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375524350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,361 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/ddb667c32fef4d9682377b5f2988ca2e 2024-11-23T15:24:24,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/31e307a7b7a347cdb0fb130b2440d8f4 is 50, key is test_row_0/C:col10/1732375464042/Put/seqid=0 2024-11-23T15:24:24,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741841_1017 (size=12001) 2024-11-23T15:24:24,434 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/31e307a7b7a347cdb0fb130b2440d8f4 2024-11-23T15:24:24,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/e579954fcc08481095cb741b433be2a4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e579954fcc08481095cb741b433be2a4 2024-11-23T15:24:24,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e579954fcc08481095cb741b433be2a4, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T15:24:24,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/ddb667c32fef4d9682377b5f2988ca2e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/ddb667c32fef4d9682377b5f2988ca2e 2024-11-23T15:24:24,489 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/ddb667c32fef4d9682377b5f2988ca2e, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T15:24:24,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T15:24:24,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:24,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/31e307a7b7a347cdb0fb130b2440d8f4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/31e307a7b7a347cdb0fb130b2440d8f4 2024-11-23T15:24:24,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:24,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:24,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/31e307a7b7a347cdb0fb130b2440d8f4, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T15:24:24,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for d6bd711ee7b1117306956b276de6b58d in 442ms, sequenceid=16, compaction requested=false 2024-11-23T15:24:24,513 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-23T15:24:24,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:24,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T15:24:24,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:24,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:24,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:24,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:24,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:24,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:24,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/ee891497a652468087b83876b6c44b7b is 50, key is test_row_0/A:col10/1732375464181/Put/seqid=0 2024-11-23T15:24:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T15:24:24,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741842_1018 (size=12001) 2024-11-23T15:24:24,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/ee891497a652468087b83876b6c44b7b 2024-11-23T15:24:24,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375524611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375524611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375524617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375524622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375524623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/35941dfec8c24b19b14632f52495c6be is 50, key is test_row_0/B:col10/1732375464181/Put/seqid=0 2024-11-23T15:24:24,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741843_1019 (size=12001) 2024-11-23T15:24:24,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/35941dfec8c24b19b14632f52495c6be 2024-11-23T15:24:24,653 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T15:24:24,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:24,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:24,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:24,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:24,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/09f513b327d84b02aa9a31d683d540c4 is 50, key is test_row_0/C:col10/1732375464181/Put/seqid=0 2024-11-23T15:24:24,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741844_1020 (size=12001) 2024-11-23T15:24:24,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/09f513b327d84b02aa9a31d683d540c4 2024-11-23T15:24:24,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375524727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375524730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375524730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/ee891497a652468087b83876b6c44b7b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ee891497a652468087b83876b6c44b7b 2024-11-23T15:24:24,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375524739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ee891497a652468087b83876b6c44b7b, entries=150, sequenceid=40, filesize=11.7 K 2024-11-23T15:24:24,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375524739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/35941dfec8c24b19b14632f52495c6be as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/35941dfec8c24b19b14632f52495c6be 2024-11-23T15:24:24,763 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T15:24:24,764 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-23T15:24:24,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/35941dfec8c24b19b14632f52495c6be, entries=150, sequenceid=40, filesize=11.7 K 2024-11-23T15:24:24,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/09f513b327d84b02aa9a31d683d540c4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/09f513b327d84b02aa9a31d683d540c4 2024-11-23T15:24:24,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/09f513b327d84b02aa9a31d683d540c4, entries=150, sequenceid=40, filesize=11.7 K 2024-11-23T15:24:24,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for d6bd711ee7b1117306956b276de6b58d in 218ms, sequenceid=40, compaction requested=false 2024-11-23T15:24:24,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:24,809 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T15:24:24,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:24,810 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T15:24:24,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:24,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:24,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:24,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:24,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:24,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:24,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/912b7f0cf1d942038d553477fdb845bc is 50, key is test_row_0/A:col10/1732375464615/Put/seqid=0 2024-11-23T15:24:24,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741845_1021 (size=12001) 2024-11-23T15:24:24,846 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/912b7f0cf1d942038d553477fdb845bc 2024-11-23T15:24:24,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/83e0108851094f4481c52372dbddb08b is 50, key is test_row_0/B:col10/1732375464615/Put/seqid=0 2024-11-23T15:24:24,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741846_1022 (size=12001) 2024-11-23T15:24:24,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:24,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:24,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375524985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375524989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375524989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375524992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:24,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:24,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375524995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T15:24:25,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375525099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375525101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375525102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375525102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375525104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,286 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/83e0108851094f4481c52372dbddb08b 2024-11-23T15:24:25,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/bf8a3b7f443c43debfd21881d760379f is 50, key is test_row_0/C:col10/1732375464615/Put/seqid=0 2024-11-23T15:24:25,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375525305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375525306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375525310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375525311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375525310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741847_1023 (size=12001) 2024-11-23T15:24:25,332 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/bf8a3b7f443c43debfd21881d760379f 2024-11-23T15:24:25,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/912b7f0cf1d942038d553477fdb845bc as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/912b7f0cf1d942038d553477fdb845bc 2024-11-23T15:24:25,357 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/912b7f0cf1d942038d553477fdb845bc, entries=150, sequenceid=53, filesize=11.7 K 2024-11-23T15:24:25,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/83e0108851094f4481c52372dbddb08b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/83e0108851094f4481c52372dbddb08b 2024-11-23T15:24:25,370 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/83e0108851094f4481c52372dbddb08b, entries=150, sequenceid=53, filesize=11.7 K 2024-11-23T15:24:25,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/bf8a3b7f443c43debfd21881d760379f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bf8a3b7f443c43debfd21881d760379f 2024-11-23T15:24:25,385 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bf8a3b7f443c43debfd21881d760379f, entries=150, sequenceid=53, filesize=11.7 K 2024-11-23T15:24:25,387 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d6bd711ee7b1117306956b276de6b58d in 577ms, sequenceid=53, compaction requested=true 2024-11-23T15:24:25,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:25,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:25,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-23T15:24:25,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-23T15:24:25,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-23T15:24:25,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4010 sec 2024-11-23T15:24:25,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.4150 sec 2024-11-23T15:24:25,538 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T15:24:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:25,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:24:25,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:25,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:25,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:25,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:25,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:25,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:25,635 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/26c11edb4773456eb633defb4885fe9e is 50, key is test_row_0/A:col10/1732375464984/Put/seqid=0 2024-11-23T15:24:25,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375525637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375525637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375525641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375525644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375525646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741848_1024 (size=16681) 2024-11-23T15:24:25,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375525749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375525751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375525752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375525752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375525755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375525954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375525957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375525960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375525962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:25,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:25,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375525962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/26c11edb4773456eb633defb4885fe9e 2024-11-23T15:24:26,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/cc769daab3164dc6a1ef008fe117679c is 50, key is test_row_0/B:col10/1732375464984/Put/seqid=0 2024-11-23T15:24:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T15:24:26,098 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-23T15:24:26,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741849_1025 (size=12001) 2024-11-23T15:24:26,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:26,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/cc769daab3164dc6a1ef008fe117679c 2024-11-23T15:24:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-23T15:24:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T15:24:26,106 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:26,108 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:26,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:26,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/bd6be0518dd448efab03260c4d0f92bd is 50, key is test_row_0/C:col10/1732375464984/Put/seqid=0 2024-11-23T15:24:26,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741850_1026 (size=12001) 2024-11-23T15:24:26,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/bd6be0518dd448efab03260c4d0f92bd 2024-11-23T15:24:26,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/26c11edb4773456eb633defb4885fe9e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/26c11edb4773456eb633defb4885fe9e 2024-11-23T15:24:26,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/26c11edb4773456eb633defb4885fe9e, entries=250, sequenceid=78, filesize=16.3 K 2024-11-23T15:24:26,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/cc769daab3164dc6a1ef008fe117679c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/cc769daab3164dc6a1ef008fe117679c 2024-11-23T15:24:26,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T15:24:26,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/cc769daab3164dc6a1ef008fe117679c, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T15:24:26,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/bd6be0518dd448efab03260c4d0f92bd as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bd6be0518dd448efab03260c4d0f92bd 2024-11-23T15:24:26,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bd6be0518dd448efab03260c4d0f92bd, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T15:24:26,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d6bd711ee7b1117306956b276de6b58d in 608ms, sequenceid=78, compaction requested=true 2024-11-23T15:24:26,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:26,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:26,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:26,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:26,244 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:24:26,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:26,244 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:24:26,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:26,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:26,249 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:24:26,251 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/B is initiating minor compaction (all files) 2024-11-23T15:24:26,251 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/B in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:26,252 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/ddb667c32fef4d9682377b5f2988ca2e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/35941dfec8c24b19b14632f52495c6be, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/83e0108851094f4481c52372dbddb08b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/cc769daab3164dc6a1ef008fe117679c] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=46.9 K 2024-11-23T15:24:26,253 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52684 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:24:26,253 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/A is initiating minor compaction (all files) 2024-11-23T15:24:26,254 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/A in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:26,254 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e579954fcc08481095cb741b433be2a4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ee891497a652468087b83876b6c44b7b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/912b7f0cf1d942038d553477fdb845bc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/26c11edb4773456eb633defb4885fe9e] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=51.4 K 2024-11-23T15:24:26,255 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting ddb667c32fef4d9682377b5f2988ca2e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732375464042 2024-11-23T15:24:26,255 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e579954fcc08481095cb741b433be2a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732375464042 2024-11-23T15:24:26,256 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 35941dfec8c24b19b14632f52495c6be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732375464170 2024-11-23T15:24:26,257 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 83e0108851094f4481c52372dbddb08b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732375464610 2024-11-23T15:24:26,257 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee891497a652468087b83876b6c44b7b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732375464170 2024-11-23T15:24:26,258 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 912b7f0cf1d942038d553477fdb845bc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732375464610 2024-11-23T15:24:26,258 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting cc769daab3164dc6a1ef008fe117679c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732375464984 2024-11-23T15:24:26,259 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26c11edb4773456eb633defb4885fe9e, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732375464984 2024-11-23T15:24:26,264 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T15:24:26,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:26,266 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:24:26,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:26,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:26,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:26,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:26,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:26,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:26,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:26,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:26,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/bf71f2056e424d7e8978d648f086ab93 is 50, key is test_row_0/A:col10/1732375466264/Put/seqid=0 2024-11-23T15:24:26,313 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#B#compaction#13 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:26,314 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/9e712ac02d7846768861d28baf8dd11f is 50, key is test_row_0/B:col10/1732375464984/Put/seqid=0 2024-11-23T15:24:26,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741851_1027 (size=14341) 2024-11-23T15:24:26,325 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/bf71f2056e424d7e8978d648f086ab93 2024-11-23T15:24:26,336 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#A#compaction#14 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:26,338 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/018dd68209954569b79bc260523b9d24 is 50, key is test_row_0/A:col10/1732375464984/Put/seqid=0 2024-11-23T15:24:26,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741852_1028 (size=12139) 2024-11-23T15:24:26,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/fc82dfb2bdad4139864ac1c331588b11 is 50, key is test_row_0/B:col10/1732375466264/Put/seqid=0 2024-11-23T15:24:26,369 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/9e712ac02d7846768861d28baf8dd11f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9e712ac02d7846768861d28baf8dd11f 2024-11-23T15:24:26,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741853_1029 (size=12139) 2024-11-23T15:24:26,395 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/B of d6bd711ee7b1117306956b276de6b58d into 9e712ac02d7846768861d28baf8dd11f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:26,395 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:26,395 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/B, priority=12, startTime=1732375466244; duration=0sec 2024-11-23T15:24:26,396 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:26,396 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:B 2024-11-23T15:24:26,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741854_1030 (size=12001) 2024-11-23T15:24:26,396 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:24:26,401 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/fc82dfb2bdad4139864ac1c331588b11 2024-11-23T15:24:26,405 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:24:26,406 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/C is initiating minor compaction (all files) 2024-11-23T15:24:26,406 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/C in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:26,406 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/31e307a7b7a347cdb0fb130b2440d8f4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/09f513b327d84b02aa9a31d683d540c4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bf8a3b7f443c43debfd21881d760379f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bd6be0518dd448efab03260c4d0f92bd] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=46.9 K 2024-11-23T15:24:26,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T15:24:26,409 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 31e307a7b7a347cdb0fb130b2440d8f4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732375464042 2024-11-23T15:24:26,410 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 09f513b327d84b02aa9a31d683d540c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732375464170 2024-11-23T15:24:26,410 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/018dd68209954569b79bc260523b9d24 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/018dd68209954569b79bc260523b9d24 2024-11-23T15:24:26,411 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting bf8a3b7f443c43debfd21881d760379f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732375464610 2024-11-23T15:24:26,412 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting bd6be0518dd448efab03260c4d0f92bd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732375464984 2024-11-23T15:24:26,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375526403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375526403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375526404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375526407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375526408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,432 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/A of d6bd711ee7b1117306956b276de6b58d into 018dd68209954569b79bc260523b9d24(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:26,432 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:26,432 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/A, priority=12, startTime=1732375466231; duration=0sec 2024-11-23T15:24:26,433 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:26,433 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:A 2024-11-23T15:24:26,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/3ed30acab37b41cf9ba19b896bfdb1f1 is 50, key is test_row_0/C:col10/1732375466264/Put/seqid=0 2024-11-23T15:24:26,449 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#C#compaction#17 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:26,450 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/6b53822c33124fe58e937df549f25ce5 is 50, key is test_row_0/C:col10/1732375464984/Put/seqid=0 2024-11-23T15:24:26,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741855_1031 (size=12001) 2024-11-23T15:24:26,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741856_1032 (size=12139) 2024-11-23T15:24:26,471 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/3ed30acab37b41cf9ba19b896bfdb1f1 2024-11-23T15:24:26,484 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/6b53822c33124fe58e937df549f25ce5 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6b53822c33124fe58e937df549f25ce5 2024-11-23T15:24:26,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/bf71f2056e424d7e8978d648f086ab93 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bf71f2056e424d7e8978d648f086ab93 2024-11-23T15:24:26,499 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bf71f2056e424d7e8978d648f086ab93, entries=200, sequenceid=90, filesize=14.0 K 2024-11-23T15:24:26,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/fc82dfb2bdad4139864ac1c331588b11 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/fc82dfb2bdad4139864ac1c331588b11 2024-11-23T15:24:26,501 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/C of d6bd711ee7b1117306956b276de6b58d into 6b53822c33124fe58e937df549f25ce5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:26,502 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:26,502 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/C, priority=12, startTime=1732375466244; duration=0sec 2024-11-23T15:24:26,502 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:26,502 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:C 2024-11-23T15:24:26,511 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/fc82dfb2bdad4139864ac1c331588b11, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T15:24:26,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/3ed30acab37b41cf9ba19b896bfdb1f1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/3ed30acab37b41cf9ba19b896bfdb1f1 2024-11-23T15:24:26,525 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/3ed30acab37b41cf9ba19b896bfdb1f1, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T15:24:26,528 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for d6bd711ee7b1117306956b276de6b58d in 262ms, sequenceid=90, compaction requested=false 2024-11-23T15:24:26,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:26,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:26,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-23T15:24:26,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-23T15:24:26,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T15:24:26,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:26,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:26,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:26,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:26,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:26,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:26,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:26,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/288940ce4f7644058181039831910ec8 is 50, key is test_row_0/A:col10/1732375466355/Put/seqid=0 2024-11-23T15:24:26,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-23T15:24:26,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 425 msec 2024-11-23T15:24:26,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 448 msec 2024-11-23T15:24:26,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741857_1033 (size=12001) 2024-11-23T15:24:26,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375526596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375526596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375526597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375526600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375526601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T15:24:26,710 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-23T15:24:26,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375526713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,715 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:26,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375526714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375526714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375526717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-23T15:24:26,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T15:24:26,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375526718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,725 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:26,727 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:26,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:26,752 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T15:24:26,752 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T15:24:26,755 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-23T15:24:26,755 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-23T15:24:26,757 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T15:24:26,757 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T15:24:26,757 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T15:24:26,757 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T15:24:26,759 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T15:24:26,761 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-23T15:24:26,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T15:24:26,880 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T15:24:26,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:26,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:26,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:26,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:26,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:26,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:26,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375526917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375526919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375526919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375526920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:26,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375526929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:26,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/288940ce4f7644058181039831910ec8 2024-11-23T15:24:26,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/0b2b1451ba524ff3a891b43e3e1ca7e7 is 50, key is test_row_0/B:col10/1732375466355/Put/seqid=0 2024-11-23T15:24:27,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741858_1034 (size=12001) 2024-11-23T15:24:27,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T15:24:27,037 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T15:24:27,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:27,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,192 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T15:24:27,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:27,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375527225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375527226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375527229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375527230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375527239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T15:24:27,350 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T15:24:27,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:27,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/0b2b1451ba524ff3a891b43e3e1ca7e7 2024-11-23T15:24:27,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/53311e6fe2144d03b678581a8d617ed7 is 50, key is test_row_0/C:col10/1732375466355/Put/seqid=0 2024-11-23T15:24:27,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741859_1035 (size=12001) 2024-11-23T15:24:27,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/53311e6fe2144d03b678581a8d617ed7 2024-11-23T15:24:27,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/288940ce4f7644058181039831910ec8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/288940ce4f7644058181039831910ec8 2024-11-23T15:24:27,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/288940ce4f7644058181039831910ec8, entries=150, sequenceid=119, filesize=11.7 K 2024-11-23T15:24:27,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/0b2b1451ba524ff3a891b43e3e1ca7e7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/0b2b1451ba524ff3a891b43e3e1ca7e7 2024-11-23T15:24:27,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/0b2b1451ba524ff3a891b43e3e1ca7e7, entries=150, sequenceid=119, filesize=11.7 K 2024-11-23T15:24:27,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/53311e6fe2144d03b678581a8d617ed7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/53311e6fe2144d03b678581a8d617ed7 2024-11-23T15:24:27,506 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T15:24:27,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:27,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:27,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/53311e6fe2144d03b678581a8d617ed7, entries=150, sequenceid=119, filesize=11.7 K 2024-11-23T15:24:27,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d6bd711ee7b1117306956b276de6b58d in 983ms, sequenceid=119, compaction requested=true 2024-11-23T15:24:27,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:27,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:27,513 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:27,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:27,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:27,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:27,513 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:27,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:27,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:27,515 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:27,515 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:27,516 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/B is initiating minor compaction (all files) 2024-11-23T15:24:27,516 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/A is initiating minor compaction (all files) 2024-11-23T15:24:27,516 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/B in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,516 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/A in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,516 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9e712ac02d7846768861d28baf8dd11f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/fc82dfb2bdad4139864ac1c331588b11, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/0b2b1451ba524ff3a891b43e3e1ca7e7] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=35.3 K 2024-11-23T15:24:27,516 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/018dd68209954569b79bc260523b9d24, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bf71f2056e424d7e8978d648f086ab93, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/288940ce4f7644058181039831910ec8] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=37.6 K 2024-11-23T15:24:27,517 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e712ac02d7846768861d28baf8dd11f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732375464984 2024-11-23T15:24:27,517 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 018dd68209954569b79bc260523b9d24, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732375464984 2024-11-23T15:24:27,518 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting fc82dfb2bdad4139864ac1c331588b11, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732375465635 2024-11-23T15:24:27,518 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf71f2056e424d7e8978d648f086ab93, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732375465635 2024-11-23T15:24:27,519 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 288940ce4f7644058181039831910ec8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732375466355 2024-11-23T15:24:27,519 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b2b1451ba524ff3a891b43e3e1ca7e7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732375466355 2024-11-23T15:24:27,546 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#A#compaction#21 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:27,547 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/3c816e3608bf447d953f8a35abdb9934 is 50, key is test_row_0/A:col10/1732375466355/Put/seqid=0 2024-11-23T15:24:27,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741860_1036 (size=12241) 2024-11-23T15:24:27,568 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#B#compaction#22 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:27,570 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/32324e433d5c4e5d86a36709f9687023 is 50, key is test_row_0/B:col10/1732375466355/Put/seqid=0 2024-11-23T15:24:27,576 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/3c816e3608bf447d953f8a35abdb9934 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/3c816e3608bf447d953f8a35abdb9934 2024-11-23T15:24:27,590 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/A of d6bd711ee7b1117306956b276de6b58d into 3c816e3608bf447d953f8a35abdb9934(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:27,590 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:27,590 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/A, priority=13, startTime=1732375467513; duration=0sec 2024-11-23T15:24:27,591 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:27,591 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:A 2024-11-23T15:24:27,592 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:27,594 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:27,595 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/C is initiating minor compaction (all files) 2024-11-23T15:24:27,595 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/C in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,596 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6b53822c33124fe58e937df549f25ce5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/3ed30acab37b41cf9ba19b896bfdb1f1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/53311e6fe2144d03b678581a8d617ed7] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=35.3 K 2024-11-23T15:24:27,597 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b53822c33124fe58e937df549f25ce5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732375464984 2024-11-23T15:24:27,598 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ed30acab37b41cf9ba19b896bfdb1f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732375465635 2024-11-23T15:24:27,598 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53311e6fe2144d03b678581a8d617ed7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732375466355 2024-11-23T15:24:27,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741861_1037 (size=12241) 2024-11-23T15:24:27,632 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/32324e433d5c4e5d86a36709f9687023 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/32324e433d5c4e5d86a36709f9687023 2024-11-23T15:24:27,640 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#C#compaction#23 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:27,641 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/5611991eb11746ed9d8fcff6bbf0f134 is 50, key is test_row_0/C:col10/1732375466355/Put/seqid=0 2024-11-23T15:24:27,656 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/B of d6bd711ee7b1117306956b276de6b58d into 32324e433d5c4e5d86a36709f9687023(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:27,656 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:27,656 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/B, priority=13, startTime=1732375467513; duration=0sec 2024-11-23T15:24:27,656 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:27,656 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:B 2024-11-23T15:24:27,660 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T15:24:27,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:27,662 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T15:24:27,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:27,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:27,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:27,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:27,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:27,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:27,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/f7da8f646972432aa4b1918f8ea6f70c is 50, key is test_row_0/A:col10/1732375466595/Put/seqid=0 2024-11-23T15:24:27,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741862_1038 (size=12241) 2024-11-23T15:24:27,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741863_1039 (size=9657) 2024-11-23T15:24:27,687 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/f7da8f646972432aa4b1918f8ea6f70c 2024-11-23T15:24:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/4ca52b70305c4b78bb9c03679a78d73e is 50, key is test_row_0/B:col10/1732375466595/Put/seqid=0 2024-11-23T15:24:27,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:27,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741864_1040 (size=9657) 2024-11-23T15:24:27,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375527779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375527782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375527782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375527784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375527786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T15:24:27,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375527901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375527901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375527902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375527902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:27,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375527902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,089 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/5611991eb11746ed9d8fcff6bbf0f134 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/5611991eb11746ed9d8fcff6bbf0f134 2024-11-23T15:24:28,101 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/C of d6bd711ee7b1117306956b276de6b58d into 5611991eb11746ed9d8fcff6bbf0f134(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:28,101 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:28,101 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/C, priority=13, startTime=1732375467513; duration=0sec 2024-11-23T15:24:28,101 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:28,102 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:C 2024-11-23T15:24:28,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375528108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375528108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375528109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375528110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375528113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,149 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/4ca52b70305c4b78bb9c03679a78d73e 2024-11-23T15:24:28,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/077fb6a3345747249969d4a4cbe13ecd is 50, key is test_row_0/C:col10/1732375466595/Put/seqid=0 2024-11-23T15:24:28,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741865_1041 (size=9657) 2024-11-23T15:24:28,207 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/077fb6a3345747249969d4a4cbe13ecd 2024-11-23T15:24:28,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/f7da8f646972432aa4b1918f8ea6f70c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f7da8f646972432aa4b1918f8ea6f70c 2024-11-23T15:24:28,230 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f7da8f646972432aa4b1918f8ea6f70c, entries=100, sequenceid=131, filesize=9.4 K 2024-11-23T15:24:28,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/4ca52b70305c4b78bb9c03679a78d73e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/4ca52b70305c4b78bb9c03679a78d73e 2024-11-23T15:24:28,242 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/4ca52b70305c4b78bb9c03679a78d73e, entries=100, sequenceid=131, filesize=9.4 K 2024-11-23T15:24:28,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/077fb6a3345747249969d4a4cbe13ecd as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/077fb6a3345747249969d4a4cbe13ecd 2024-11-23T15:24:28,259 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/077fb6a3345747249969d4a4cbe13ecd, entries=100, sequenceid=131, filesize=9.4 K 2024-11-23T15:24:28,260 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for d6bd711ee7b1117306956b276de6b58d in 599ms, sequenceid=131, compaction requested=false 2024-11-23T15:24:28,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:28,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:28,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-23T15:24:28,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-23T15:24:28,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-23T15:24:28,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5350 sec 2024-11-23T15:24:28,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.5520 sec 2024-11-23T15:24:28,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:28,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-23T15:24:28,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:28,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:28,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:28,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:28,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:28,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:28,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/32b1e44119704d4fa575ab5bfe07f9c4 is 50, key is test_row_0/A:col10/1732375468417/Put/seqid=0 2024-11-23T15:24:28,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375528425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375528431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375528432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375528432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375528434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741866_1042 (size=12151) 2024-11-23T15:24:28,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375528535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375528534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375528537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375528540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375528541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375528740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375528742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375528743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375528744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:28,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375528747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T15:24:28,831 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-23T15:24:28,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:28,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-23T15:24:28,836 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:28,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T15:24:28,838 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:28,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:28,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/32b1e44119704d4fa575ab5bfe07f9c4 2024-11-23T15:24:28,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/3dfde1948e3841dd9c882f7bf96ced7b is 50, key is test_row_0/B:col10/1732375468417/Put/seqid=0 2024-11-23T15:24:28,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741867_1043 (size=12151) 2024-11-23T15:24:28,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T15:24:28,993 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:28,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T15:24:28,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:28,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:28,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:28,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:28,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:28,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:29,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375529045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375529048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375529049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375529049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375529054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T15:24:29,150 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T15:24:29,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:29,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:29,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:29,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:29,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:29,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:29,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/3dfde1948e3841dd9c882f7bf96ced7b 2024-11-23T15:24:29,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/4e1c27dbfc9e4f2d91e6ffd7fa7a66b1 is 50, key is test_row_0/C:col10/1732375468417/Put/seqid=0 2024-11-23T15:24:29,305 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T15:24:29,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:29,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:29,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:29,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:29,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:29,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:29,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741868_1044 (size=12151) 2024-11-23T15:24:29,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/4e1c27dbfc9e4f2d91e6ffd7fa7a66b1 2024-11-23T15:24:29,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/32b1e44119704d4fa575ab5bfe07f9c4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/32b1e44119704d4fa575ab5bfe07f9c4 2024-11-23T15:24:29,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/32b1e44119704d4fa575ab5bfe07f9c4, entries=150, sequenceid=162, filesize=11.9 K 2024-11-23T15:24:29,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/3dfde1948e3841dd9c882f7bf96ced7b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dfde1948e3841dd9c882f7bf96ced7b 2024-11-23T15:24:29,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dfde1948e3841dd9c882f7bf96ced7b, entries=150, sequenceid=162, filesize=11.9 K 2024-11-23T15:24:29,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/4e1c27dbfc9e4f2d91e6ffd7fa7a66b1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4e1c27dbfc9e4f2d91e6ffd7fa7a66b1 2024-11-23T15:24:29,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4e1c27dbfc9e4f2d91e6ffd7fa7a66b1, entries=150, sequenceid=162, filesize=11.9 K 2024-11-23T15:24:29,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=33.54 KB/34350 for d6bd711ee7b1117306956b276de6b58d in 976ms, sequenceid=162, compaction requested=true 2024-11-23T15:24:29,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:29,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:29,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:29,396 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:29,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:29,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:29,396 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:29,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:29,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:29,398 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:29,398 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/A is initiating minor compaction (all files) 2024-11-23T15:24:29,398 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/A in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:29,398 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/3c816e3608bf447d953f8a35abdb9934, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f7da8f646972432aa4b1918f8ea6f70c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/32b1e44119704d4fa575ab5bfe07f9c4] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=33.3 K 2024-11-23T15:24:29,399 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:29,399 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/B is initiating minor compaction (all files) 2024-11-23T15:24:29,399 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/B in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:29,399 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/32324e433d5c4e5d86a36709f9687023, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/4ca52b70305c4b78bb9c03679a78d73e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dfde1948e3841dd9c882f7bf96ced7b] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=33.3 K 2024-11-23T15:24:29,400 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c816e3608bf447d953f8a35abdb9934, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732375466355 2024-11-23T15:24:29,401 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 32324e433d5c4e5d86a36709f9687023, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732375466355 2024-11-23T15:24:29,402 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7da8f646972432aa4b1918f8ea6f70c, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732375466595 2024-11-23T15:24:29,403 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ca52b70305c4b78bb9c03679a78d73e, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732375466595 2024-11-23T15:24:29,403 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32b1e44119704d4fa575ab5bfe07f9c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732375467782 2024-11-23T15:24:29,404 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3dfde1948e3841dd9c882f7bf96ced7b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732375467782 2024-11-23T15:24:29,426 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#A#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:29,427 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/5595114e44624010a6316ddc252afafd is 50, key is test_row_0/A:col10/1732375468417/Put/seqid=0 2024-11-23T15:24:29,438 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#B#compaction#31 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:29,438 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/3d724afe56f545578d002449e9a74ee4 is 50, key is test_row_0/B:col10/1732375468417/Put/seqid=0 2024-11-23T15:24:29,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T15:24:29,460 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T15:24:29,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:29,461 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T15:24:29,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:29,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:29,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:29,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:29,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:29,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:29,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741869_1045 (size=12493) 2024-11-23T15:24:29,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/e0749eb2f0754d45b428588ce75e7545 is 50, key is test_row_0/A:col10/1732375468428/Put/seqid=0 2024-11-23T15:24:29,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741870_1046 (size=12493) 2024-11-23T15:24:29,496 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/3d724afe56f545578d002449e9a74ee4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3d724afe56f545578d002449e9a74ee4 2024-11-23T15:24:29,509 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/B of d6bd711ee7b1117306956b276de6b58d into 3d724afe56f545578d002449e9a74ee4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:29,510 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:29,510 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/B, priority=13, startTime=1732375469396; duration=0sec 2024-11-23T15:24:29,510 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:29,510 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:B 2024-11-23T15:24:29,510 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:29,513 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:29,513 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/C is initiating minor compaction (all files) 2024-11-23T15:24:29,513 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/C in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:29,513 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/5611991eb11746ed9d8fcff6bbf0f134, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/077fb6a3345747249969d4a4cbe13ecd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4e1c27dbfc9e4f2d91e6ffd7fa7a66b1] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=33.3 K 2024-11-23T15:24:29,515 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5611991eb11746ed9d8fcff6bbf0f134, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732375466355 2024-11-23T15:24:29,516 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 077fb6a3345747249969d4a4cbe13ecd, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732375466595 2024-11-23T15:24:29,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741871_1047 (size=12151) 2024-11-23T15:24:29,517 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e1c27dbfc9e4f2d91e6ffd7fa7a66b1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732375467782 2024-11-23T15:24:29,517 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/e0749eb2f0754d45b428588ce75e7545 2024-11-23T15:24:29,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/7f5fa19be31d4d078338a9db7780371a is 50, key is test_row_0/B:col10/1732375468428/Put/seqid=0 2024-11-23T15:24:29,548 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#C#compaction#34 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:29,549 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/0581a228a8844d548d43d6fb97c66410 is 50, key is test_row_0/C:col10/1732375468417/Put/seqid=0 2024-11-23T15:24:29,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:29,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:29,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741873_1049 (size=12493) 2024-11-23T15:24:29,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741872_1048 (size=12151) 2024-11-23T15:24:29,585 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/7f5fa19be31d4d078338a9db7780371a 2024-11-23T15:24:29,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/feac5bd9c6dc439dba5b85e8aba2d3ac is 50, key is test_row_0/C:col10/1732375468428/Put/seqid=0 2024-11-23T15:24:29,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741874_1050 (size=12151) 2024-11-23T15:24:29,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375529645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375529647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375529647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375529649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375529650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375529751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375529752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375529753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375529754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375529754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,893 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/5595114e44624010a6316ddc252afafd as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/5595114e44624010a6316ddc252afafd 2024-11-23T15:24:29,912 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/A of d6bd711ee7b1117306956b276de6b58d into 5595114e44624010a6316ddc252afafd(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:29,912 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:29,912 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/A, priority=13, startTime=1732375469395; duration=0sec 2024-11-23T15:24:29,912 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:29,912 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:A 2024-11-23T15:24:29,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T15:24:29,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375529957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375529957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375529958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375529958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:29,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375529959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:29,995 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/0581a228a8844d548d43d6fb97c66410 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0581a228a8844d548d43d6fb97c66410 2024-11-23T15:24:30,006 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/feac5bd9c6dc439dba5b85e8aba2d3ac 2024-11-23T15:24:30,006 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/C of d6bd711ee7b1117306956b276de6b58d into 0581a228a8844d548d43d6fb97c66410(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:30,006 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:30,006 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/C, priority=13, startTime=1732375469396; duration=0sec 2024-11-23T15:24:30,006 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:30,006 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:C 2024-11-23T15:24:30,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/e0749eb2f0754d45b428588ce75e7545 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e0749eb2f0754d45b428588ce75e7545 2024-11-23T15:24:30,022 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e0749eb2f0754d45b428588ce75e7545, entries=150, sequenceid=170, filesize=11.9 K 2024-11-23T15:24:30,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/7f5fa19be31d4d078338a9db7780371a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7f5fa19be31d4d078338a9db7780371a 2024-11-23T15:24:30,030 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7f5fa19be31d4d078338a9db7780371a, entries=150, sequenceid=170, filesize=11.9 K 2024-11-23T15:24:30,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/feac5bd9c6dc439dba5b85e8aba2d3ac as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/feac5bd9c6dc439dba5b85e8aba2d3ac 2024-11-23T15:24:30,041 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/feac5bd9c6dc439dba5b85e8aba2d3ac, entries=150, sequenceid=170, filesize=11.9 K 2024-11-23T15:24:30,042 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for d6bd711ee7b1117306956b276de6b58d in 581ms, sequenceid=170, compaction requested=false 2024-11-23T15:24:30,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:30,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:30,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-23T15:24:30,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-23T15:24:30,047 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-23T15:24:30,047 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2060 sec 2024-11-23T15:24:30,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.2150 sec 2024-11-23T15:24:30,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:30,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-23T15:24:30,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:30,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:30,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:30,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:30,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:30,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:30,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375530266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375530266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375530271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375530274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375530274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/0f24632235464f9bad380ab1194c90c6 is 50, key is test_row_0/A:col10/1732375470265/Put/seqid=0 2024-11-23T15:24:30,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741875_1051 (size=12151) 2024-11-23T15:24:30,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/0f24632235464f9bad380ab1194c90c6 2024-11-23T15:24:30,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/bdcd6af87a774e3894b6d14b02cd1069 is 50, key is test_row_0/B:col10/1732375470265/Put/seqid=0 2024-11-23T15:24:30,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741876_1052 (size=12151) 2024-11-23T15:24:30,362 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/bdcd6af87a774e3894b6d14b02cd1069 2024-11-23T15:24:30,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/6d88af0660934234acf464d8ef56bd2b is 50, key is test_row_0/C:col10/1732375470265/Put/seqid=0 2024-11-23T15:24:30,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375530376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375530376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375530377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375530379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375530383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741877_1053 (size=12151) 2024-11-23T15:24:30,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/6d88af0660934234acf464d8ef56bd2b 2024-11-23T15:24:30,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/0f24632235464f9bad380ab1194c90c6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0f24632235464f9bad380ab1194c90c6 2024-11-23T15:24:30,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0f24632235464f9bad380ab1194c90c6, entries=150, sequenceid=205, filesize=11.9 K 2024-11-23T15:24:30,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/bdcd6af87a774e3894b6d14b02cd1069 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/bdcd6af87a774e3894b6d14b02cd1069 2024-11-23T15:24:30,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/bdcd6af87a774e3894b6d14b02cd1069, entries=150, sequenceid=205, filesize=11.9 K 2024-11-23T15:24:30,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/6d88af0660934234acf464d8ef56bd2b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6d88af0660934234acf464d8ef56bd2b 2024-11-23T15:24:30,435 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6d88af0660934234acf464d8ef56bd2b, entries=150, sequenceid=205, filesize=11.9 K 2024-11-23T15:24:30,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=13.42 KB/13740 for d6bd711ee7b1117306956b276de6b58d in 173ms, sequenceid=205, compaction requested=true 2024-11-23T15:24:30,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:30,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:30,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:30,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:30,438 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:30,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:30,438 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:30,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:30,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:30,440 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:30,440 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/B is initiating minor compaction (all files) 2024-11-23T15:24:30,440 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/B in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:30,440 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:30,440 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3d724afe56f545578d002449e9a74ee4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7f5fa19be31d4d078338a9db7780371a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/bdcd6af87a774e3894b6d14b02cd1069] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=35.9 K 2024-11-23T15:24:30,440 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/A is initiating minor compaction (all files) 2024-11-23T15:24:30,441 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/A in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:30,441 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/5595114e44624010a6316ddc252afafd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e0749eb2f0754d45b428588ce75e7545, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0f24632235464f9bad380ab1194c90c6] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=35.9 K 2024-11-23T15:24:30,442 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d724afe56f545578d002449e9a74ee4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732375467782 2024-11-23T15:24:30,442 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5595114e44624010a6316ddc252afafd, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732375467782 2024-11-23T15:24:30,443 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f5fa19be31d4d078338a9db7780371a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732375468422 2024-11-23T15:24:30,443 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0749eb2f0754d45b428588ce75e7545, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732375468422 2024-11-23T15:24:30,444 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting bdcd6af87a774e3894b6d14b02cd1069, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732375469647 2024-11-23T15:24:30,444 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f24632235464f9bad380ab1194c90c6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732375469647 2024-11-23T15:24:30,464 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#A#compaction#39 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:30,465 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/cb3479e8a05b4f8ba315e9d17f9728ab is 50, key is test_row_0/A:col10/1732375470265/Put/seqid=0 2024-11-23T15:24:30,472 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#B#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:30,473 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/235d6ee79118430ebc1d437d12869347 is 50, key is test_row_0/B:col10/1732375470265/Put/seqid=0 2024-11-23T15:24:30,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741878_1054 (size=12595) 2024-11-23T15:24:30,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741879_1055 (size=12595) 2024-11-23T15:24:30,516 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/cb3479e8a05b4f8ba315e9d17f9728ab as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/cb3479e8a05b4f8ba315e9d17f9728ab 2024-11-23T15:24:30,524 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/235d6ee79118430ebc1d437d12869347 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/235d6ee79118430ebc1d437d12869347 2024-11-23T15:24:30,530 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/A of d6bd711ee7b1117306956b276de6b58d into cb3479e8a05b4f8ba315e9d17f9728ab(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:30,531 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:30,531 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/A, priority=13, startTime=1732375470438; duration=0sec 2024-11-23T15:24:30,531 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:30,533 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:A 2024-11-23T15:24:30,533 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:30,535 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/B of d6bd711ee7b1117306956b276de6b58d into 235d6ee79118430ebc1d437d12869347(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:30,535 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:30,535 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/B, priority=13, startTime=1732375470438; duration=0sec 2024-11-23T15:24:30,535 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:30,535 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:30,535 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:B 2024-11-23T15:24:30,535 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/C is initiating minor compaction (all files) 2024-11-23T15:24:30,535 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/C in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:30,535 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0581a228a8844d548d43d6fb97c66410, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/feac5bd9c6dc439dba5b85e8aba2d3ac, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6d88af0660934234acf464d8ef56bd2b] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=35.9 K 2024-11-23T15:24:30,536 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0581a228a8844d548d43d6fb97c66410, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732375467782 2024-11-23T15:24:30,537 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting feac5bd9c6dc439dba5b85e8aba2d3ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732375468422 2024-11-23T15:24:30,538 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d88af0660934234acf464d8ef56bd2b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732375469647 2024-11-23T15:24:30,551 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#C#compaction#41 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:30,552 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/8e7ea57107b94c2bad3861306eb906e6 is 50, key is test_row_0/C:col10/1732375470265/Put/seqid=0 2024-11-23T15:24:30,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741880_1056 (size=12595) 2024-11-23T15:24:30,574 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/8e7ea57107b94c2bad3861306eb906e6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/8e7ea57107b94c2bad3861306eb906e6 2024-11-23T15:24:30,586 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/C of d6bd711ee7b1117306956b276de6b58d into 8e7ea57107b94c2bad3861306eb906e6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:30,586 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:30,586 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/C, priority=13, startTime=1732375470438; duration=0sec 2024-11-23T15:24:30,586 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:30,586 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:C 2024-11-23T15:24:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:30,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:24:30,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:30,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:30,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:30,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:30,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:30,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:30,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/f45bccbe5d074cf7bcb8dd8aeb5a8b6e is 50, key is test_row_0/A:col10/1732375470591/Put/seqid=0 2024-11-23T15:24:30,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375530618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375530618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375530621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375530622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375530623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741881_1057 (size=12151) 2024-11-23T15:24:30,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/f45bccbe5d074cf7bcb8dd8aeb5a8b6e 2024-11-23T15:24:30,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/3dac953d3143491a963460ea4499c8aa is 50, key is test_row_0/B:col10/1732375470591/Put/seqid=0 2024-11-23T15:24:30,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741882_1058 (size=12151) 2024-11-23T15:24:30,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/3dac953d3143491a963460ea4499c8aa 2024-11-23T15:24:30,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/35e179b2a7ce496d9eaf33f5fef4b704 is 50, key is test_row_0/C:col10/1732375470591/Put/seqid=0 2024-11-23T15:24:30,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741883_1059 (size=12151) 2024-11-23T15:24:30,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375530733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375530733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375530733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375530734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375530734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375530939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375530939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375530939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:30,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375530940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375530940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:30,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T15:24:30,944 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-23T15:24:30,946 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:30,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-23T15:24:30,949 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:30,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T15:24:30,950 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:30,950 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:31,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T15:24:31,102 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-23T15:24:31,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:31,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:31,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:31,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:31,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:31,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:31,137 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/35e179b2a7ce496d9eaf33f5fef4b704 2024-11-23T15:24:31,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/f45bccbe5d074cf7bcb8dd8aeb5a8b6e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f45bccbe5d074cf7bcb8dd8aeb5a8b6e 2024-11-23T15:24:31,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f45bccbe5d074cf7bcb8dd8aeb5a8b6e, entries=150, sequenceid=221, filesize=11.9 K 2024-11-23T15:24:31,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/3dac953d3143491a963460ea4499c8aa as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dac953d3143491a963460ea4499c8aa 2024-11-23T15:24:31,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dac953d3143491a963460ea4499c8aa, entries=150, sequenceid=221, filesize=11.9 K 2024-11-23T15:24:31,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/35e179b2a7ce496d9eaf33f5fef4b704 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/35e179b2a7ce496d9eaf33f5fef4b704 2024-11-23T15:24:31,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/35e179b2a7ce496d9eaf33f5fef4b704, entries=150, sequenceid=221, filesize=11.9 K 2024-11-23T15:24:31,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d6bd711ee7b1117306956b276de6b58d in 577ms, sequenceid=221, compaction requested=false 2024-11-23T15:24:31,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:31,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:31,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:24:31,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:31,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:31,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:31,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:31,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:31,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:31,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T15:24:31,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/d1457bc94fbb48b48d6d568d3f73a6ed is 50, key is test_row_0/A:col10/1732375471247/Put/seqid=0 2024-11-23T15:24:31,257 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-23T15:24:31,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:31,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:31,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:31,258 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:31,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:31,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:31,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375531260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375531262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375531263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375531263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375531264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741884_1060 (size=12151) 2024-11-23T15:24:31,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/d1457bc94fbb48b48d6d568d3f73a6ed 2024-11-23T15:24:31,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/f44783d19a104490b1a9a0a4a1c56624 is 50, key is test_row_0/B:col10/1732375471247/Put/seqid=0 2024-11-23T15:24:31,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741885_1061 (size=12151) 2024-11-23T15:24:31,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/f44783d19a104490b1a9a0a4a1c56624 2024-11-23T15:24:31,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/7df6696c55dd4805b2b611302e09d949 is 50, key is test_row_0/C:col10/1732375471247/Put/seqid=0 2024-11-23T15:24:31,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741886_1062 (size=12151) 2024-11-23T15:24:31,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/7df6696c55dd4805b2b611302e09d949 2024-11-23T15:24:31,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/d1457bc94fbb48b48d6d568d3f73a6ed as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/d1457bc94fbb48b48d6d568d3f73a6ed 2024-11-23T15:24:31,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375531365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375531366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375531367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375531368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375531369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,376 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/d1457bc94fbb48b48d6d568d3f73a6ed, entries=150, sequenceid=246, filesize=11.9 K 2024-11-23T15:24:31,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/f44783d19a104490b1a9a0a4a1c56624 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f44783d19a104490b1a9a0a4a1c56624 2024-11-23T15:24:31,389 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f44783d19a104490b1a9a0a4a1c56624, entries=150, sequenceid=246, filesize=11.9 K 2024-11-23T15:24:31,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/7df6696c55dd4805b2b611302e09d949 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7df6696c55dd4805b2b611302e09d949 2024-11-23T15:24:31,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7df6696c55dd4805b2b611302e09d949, entries=150, sequenceid=246, filesize=11.9 K 2024-11-23T15:24:31,404 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for d6bd711ee7b1117306956b276de6b58d in 155ms, sequenceid=246, compaction requested=true 2024-11-23T15:24:31,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:31,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:31,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:31,404 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:31,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:31,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:31,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:31,405 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:31,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:31,406 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:31,406 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/A is initiating minor compaction (all files) 2024-11-23T15:24:31,407 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/A in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:31,407 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:31,407 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/B is initiating minor compaction (all files) 2024-11-23T15:24:31,407 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/cb3479e8a05b4f8ba315e9d17f9728ab, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f45bccbe5d074cf7bcb8dd8aeb5a8b6e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/d1457bc94fbb48b48d6d568d3f73a6ed] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.0 K 2024-11-23T15:24:31,407 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/B in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:31,407 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/235d6ee79118430ebc1d437d12869347, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dac953d3143491a963460ea4499c8aa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f44783d19a104490b1a9a0a4a1c56624] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.0 K 2024-11-23T15:24:31,408 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb3479e8a05b4f8ba315e9d17f9728ab, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732375469647 2024-11-23T15:24:31,408 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 235d6ee79118430ebc1d437d12869347, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732375469647 2024-11-23T15:24:31,408 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3dac953d3143491a963460ea4499c8aa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732375470591 2024-11-23T15:24:31,408 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f45bccbe5d074cf7bcb8dd8aeb5a8b6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732375470591 2024-11-23T15:24:31,409 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f44783d19a104490b1a9a0a4a1c56624, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732375470621 2024-11-23T15:24:31,409 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1457bc94fbb48b48d6d568d3f73a6ed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732375470621 2024-11-23T15:24:31,414 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-23T15:24:31,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:31,415 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:24:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:31,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:31,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/ded405d2a4214442988161c923a3e570 is 50, key is test_row_0/A:col10/1732375471260/Put/seqid=0 2024-11-23T15:24:31,444 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#A#compaction#49 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:31,444 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#B#compaction#50 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:31,445 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/e398089425714411a5446fc7b8ba951b is 50, key is test_row_0/B:col10/1732375471247/Put/seqid=0 2024-11-23T15:24:31,445 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/5f008df8e1ed4e6c800e2cfe09601c0f is 50, key is test_row_0/A:col10/1732375471247/Put/seqid=0 2024-11-23T15:24:31,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741887_1063 (size=12201) 2024-11-23T15:24:31,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741888_1064 (size=12697) 2024-11-23T15:24:31,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741889_1065 (size=12697) 2024-11-23T15:24:31,498 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/e398089425714411a5446fc7b8ba951b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/e398089425714411a5446fc7b8ba951b 2024-11-23T15:24:31,511 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/B of d6bd711ee7b1117306956b276de6b58d into e398089425714411a5446fc7b8ba951b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:31,511 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:31,511 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/B, priority=13, startTime=1732375471404; duration=0sec 2024-11-23T15:24:31,511 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:31,511 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:B 2024-11-23T15:24:31,512 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:31,515 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:31,515 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/C is initiating minor compaction (all files) 2024-11-23T15:24:31,515 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/C in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:31,516 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/8e7ea57107b94c2bad3861306eb906e6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/35e179b2a7ce496d9eaf33f5fef4b704, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7df6696c55dd4805b2b611302e09d949] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.0 K 2024-11-23T15:24:31,517 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e7ea57107b94c2bad3861306eb906e6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732375469647 2024-11-23T15:24:31,526 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 35e179b2a7ce496d9eaf33f5fef4b704, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732375470591 2024-11-23T15:24:31,527 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7df6696c55dd4805b2b611302e09d949, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732375470621 2024-11-23T15:24:31,540 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#C#compaction#51 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:31,541 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/17bbb7237a6b4ae68d1d210665708eb3 is 50, key is test_row_0/C:col10/1732375471247/Put/seqid=0 2024-11-23T15:24:31,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741890_1066 (size=12697) 2024-11-23T15:24:31,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T15:24:31,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:31,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:31,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375531597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375531600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375531600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375531601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375531602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375531703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375531704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375531704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375531707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375531708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,867 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/ded405d2a4214442988161c923a3e570 2024-11-23T15:24:31,887 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/5f008df8e1ed4e6c800e2cfe09601c0f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/5f008df8e1ed4e6c800e2cfe09601c0f 2024-11-23T15:24:31,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/7209624e503e44279fb5fef3198d40ea is 50, key is test_row_0/B:col10/1732375471260/Put/seqid=0 2024-11-23T15:24:31,900 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/A of d6bd711ee7b1117306956b276de6b58d into 5f008df8e1ed4e6c800e2cfe09601c0f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:31,900 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:31,901 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/A, priority=13, startTime=1732375471404; duration=0sec 2024-11-23T15:24:31,901 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:31,901 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:A 2024-11-23T15:24:31,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375531908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375531907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375531909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375531912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:31,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375531912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:31,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741891_1067 (size=12201) 2024-11-23T15:24:31,923 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/7209624e503e44279fb5fef3198d40ea 2024-11-23T15:24:31,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/4bdc19660d2f4f4a8cef5aa27e6a0509 is 50, key is test_row_0/C:col10/1732375471260/Put/seqid=0 2024-11-23T15:24:31,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741892_1068 (size=12201) 2024-11-23T15:24:31,964 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/4bdc19660d2f4f4a8cef5aa27e6a0509 2024-11-23T15:24:31,968 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/17bbb7237a6b4ae68d1d210665708eb3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/17bbb7237a6b4ae68d1d210665708eb3 2024-11-23T15:24:31,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/ded405d2a4214442988161c923a3e570 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ded405d2a4214442988161c923a3e570 2024-11-23T15:24:31,981 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/C of d6bd711ee7b1117306956b276de6b58d into 17bbb7237a6b4ae68d1d210665708eb3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:31,981 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:31,981 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/C, priority=13, startTime=1732375471405; duration=0sec 2024-11-23T15:24:31,981 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:31,981 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:C 2024-11-23T15:24:31,988 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ded405d2a4214442988161c923a3e570, entries=150, sequenceid=258, filesize=11.9 K 2024-11-23T15:24:31,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/7209624e503e44279fb5fef3198d40ea as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7209624e503e44279fb5fef3198d40ea 2024-11-23T15:24:31,997 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7209624e503e44279fb5fef3198d40ea, entries=150, sequenceid=258, filesize=11.9 K 2024-11-23T15:24:32,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/4bdc19660d2f4f4a8cef5aa27e6a0509 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4bdc19660d2f4f4a8cef5aa27e6a0509 2024-11-23T15:24:32,011 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4bdc19660d2f4f4a8cef5aa27e6a0509, entries=150, sequenceid=258, filesize=11.9 K 2024-11-23T15:24:32,014 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d6bd711ee7b1117306956b276de6b58d in 600ms, sequenceid=258, compaction requested=false 2024-11-23T15:24:32,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:32,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-23T15:24:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-23T15:24:32,020 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-23T15:24:32,020 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0670 sec 2024-11-23T15:24:32,022 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.0740 sec 2024-11-23T15:24:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T15:24:32,054 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-23T15:24:32,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:32,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-23T15:24:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T15:24:32,059 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:32,060 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:32,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T15:24:32,213 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:32,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:24:32,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T15:24:32,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:32,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:32,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:32,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:32,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:32,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:32,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:32,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/4a4c069a6a2c425c8b3751d4ed88e556 is 50, key is test_row_0/A:col10/1732375472212/Put/seqid=0 2024-11-23T15:24:32,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375532222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375532225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375532226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375532228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375532228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741893_1069 (size=12301) 2024-11-23T15:24:32,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/4a4c069a6a2c425c8b3751d4ed88e556 2024-11-23T15:24:32,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/f1a99b6c1a274279ab49d3b63c4435a8 is 50, key is test_row_0/B:col10/1732375472212/Put/seqid=0 2024-11-23T15:24:32,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741894_1070 (size=12301) 2024-11-23T15:24:32,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375532330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375532330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375532331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375532332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375532333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T15:24:32,369 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T15:24:32,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:32,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,526 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T15:24:32,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:32,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375532534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375532534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375532536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375532537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375532540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/f1a99b6c1a274279ab49d3b63c4435a8 2024-11-23T15:24:32,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T15:24:32,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/4533e4c1829b45c996319d7c13a2d35e is 50, key is test_row_0/C:col10/1732375472212/Put/seqid=0 2024-11-23T15:24:32,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741895_1071 (size=12301) 2024-11-23T15:24:32,680 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T15:24:32,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:32,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,840 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375532839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T15:24:32,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375532838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375532839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:32,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375532843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:32,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375532847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,995 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:32,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T15:24:32,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:32,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:32,997 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:32,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:33,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/4533e4c1829b45c996319d7c13a2d35e 2024-11-23T15:24:33,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/4a4c069a6a2c425c8b3751d4ed88e556 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/4a4c069a6a2c425c8b3751d4ed88e556 2024-11-23T15:24:33,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/4a4c069a6a2c425c8b3751d4ed88e556, entries=150, sequenceid=287, filesize=12.0 K 2024-11-23T15:24:33,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/f1a99b6c1a274279ab49d3b63c4435a8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f1a99b6c1a274279ab49d3b63c4435a8 2024-11-23T15:24:33,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f1a99b6c1a274279ab49d3b63c4435a8, entries=150, sequenceid=287, filesize=12.0 K 2024-11-23T15:24:33,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/4533e4c1829b45c996319d7c13a2d35e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4533e4c1829b45c996319d7c13a2d35e 2024-11-23T15:24:33,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4533e4c1829b45c996319d7c13a2d35e, entries=150, sequenceid=287, filesize=12.0 K 2024-11-23T15:24:33,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d6bd711ee7b1117306956b276de6b58d in 899ms, sequenceid=287, compaction requested=true 2024-11-23T15:24:33,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:33,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:33,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:33,112 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:33,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:33,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:33,112 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:33,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:33,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:33,114 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37199 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:33,114 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/A is initiating minor compaction (all files) 2024-11-23T15:24:33,114 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/A in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:33,114 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/5f008df8e1ed4e6c800e2cfe09601c0f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ded405d2a4214442988161c923a3e570, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/4a4c069a6a2c425c8b3751d4ed88e556] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.3 K 2024-11-23T15:24:33,115 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37199 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:33,115 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f008df8e1ed4e6c800e2cfe09601c0f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732375470621 2024-11-23T15:24:33,115 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/B is initiating minor compaction (all files) 2024-11-23T15:24:33,115 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/B in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:33,115 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/e398089425714411a5446fc7b8ba951b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7209624e503e44279fb5fef3198d40ea, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f1a99b6c1a274279ab49d3b63c4435a8] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.3 K 2024-11-23T15:24:33,115 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ded405d2a4214442988161c923a3e570, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732375471250 2024-11-23T15:24:33,115 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting e398089425714411a5446fc7b8ba951b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732375470621 2024-11-23T15:24:33,116 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a4c069a6a2c425c8b3751d4ed88e556, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732375471589 2024-11-23T15:24:33,116 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7209624e503e44279fb5fef3198d40ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732375471250 2024-11-23T15:24:33,117 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f1a99b6c1a274279ab49d3b63c4435a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732375471589 2024-11-23T15:24:33,126 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#A#compaction#57 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:33,127 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/e80ab0ae06144da4ba9a402305f7775c is 50, key is test_row_0/A:col10/1732375472212/Put/seqid=0 2024-11-23T15:24:33,130 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#B#compaction#58 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:33,131 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/22e456cb42484fbea5f9a7082fc4171c is 50, key is test_row_0/B:col10/1732375472212/Put/seqid=0 2024-11-23T15:24:33,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741896_1072 (size=12949) 2024-11-23T15:24:33,150 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T15:24:33,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:33,151 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T15:24:33,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:33,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:33,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:33,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:33,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:33,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:33,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741897_1073 (size=12949) 2024-11-23T15:24:33,156 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/e80ab0ae06144da4ba9a402305f7775c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e80ab0ae06144da4ba9a402305f7775c 2024-11-23T15:24:33,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/379b1d66f4764d418273e5b7087deeef is 50, key is test_row_0/A:col10/1732375472226/Put/seqid=0 2024-11-23T15:24:33,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T15:24:33,169 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/A of d6bd711ee7b1117306956b276de6b58d into e80ab0ae06144da4ba9a402305f7775c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:33,169 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:33,169 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/A, priority=13, startTime=1732375473112; duration=0sec 2024-11-23T15:24:33,169 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:33,169 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:A 2024-11-23T15:24:33,169 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:33,171 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/22e456cb42484fbea5f9a7082fc4171c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/22e456cb42484fbea5f9a7082fc4171c 2024-11-23T15:24:33,172 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37199 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:33,172 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/C is initiating minor compaction (all files) 2024-11-23T15:24:33,173 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/C in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:33,173 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/17bbb7237a6b4ae68d1d210665708eb3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4bdc19660d2f4f4a8cef5aa27e6a0509, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4533e4c1829b45c996319d7c13a2d35e] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.3 K 2024-11-23T15:24:33,174 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17bbb7237a6b4ae68d1d210665708eb3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732375470621 2024-11-23T15:24:33,175 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4bdc19660d2f4f4a8cef5aa27e6a0509, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1732375471250 2024-11-23T15:24:33,176 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4533e4c1829b45c996319d7c13a2d35e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732375471589 2024-11-23T15:24:33,179 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/B of d6bd711ee7b1117306956b276de6b58d into 22e456cb42484fbea5f9a7082fc4171c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:33,179 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:33,179 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/B, priority=13, startTime=1732375473112; duration=0sec 2024-11-23T15:24:33,180 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:33,180 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:B 2024-11-23T15:24:33,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741898_1074 (size=12301) 2024-11-23T15:24:33,188 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#C#compaction#60 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:33,188 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/57009fe9f4dd468ab2a5a77450d9d34d is 50, key is test_row_0/C:col10/1732375472212/Put/seqid=0 2024-11-23T15:24:33,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741899_1075 (size=12949) 2024-11-23T15:24:33,205 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/57009fe9f4dd468ab2a5a77450d9d34d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/57009fe9f4dd468ab2a5a77450d9d34d 2024-11-23T15:24:33,213 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/C of d6bd711ee7b1117306956b276de6b58d into 57009fe9f4dd468ab2a5a77450d9d34d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:33,213 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:33,213 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/C, priority=13, startTime=1732375473112; duration=0sec 2024-11-23T15:24:33,214 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:33,214 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:C 2024-11-23T15:24:33,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:33,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:33,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375533372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375533374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375533376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375533377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375533377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375533478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375533480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375533480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375533481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375533481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,583 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/379b1d66f4764d418273e5b7087deeef 2024-11-23T15:24:33,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/28d70fcecbfc48d2b32d856377bab704 is 50, key is test_row_0/B:col10/1732375472226/Put/seqid=0 2024-11-23T15:24:33,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741900_1076 (size=12301) 2024-11-23T15:24:33,615 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/28d70fcecbfc48d2b32d856377bab704 2024-11-23T15:24:33,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/0ad7203d568c47f79c5ac10e5b13f869 is 50, key is test_row_0/C:col10/1732375472226/Put/seqid=0 2024-11-23T15:24:33,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741901_1077 (size=12301) 2024-11-23T15:24:33,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375533681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375533684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375533685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375533685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375533686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375533986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375533986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375533988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375533989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:33,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:33,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375533990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,035 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/0ad7203d568c47f79c5ac10e5b13f869 2024-11-23T15:24:34,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/379b1d66f4764d418273e5b7087deeef as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/379b1d66f4764d418273e5b7087deeef 2024-11-23T15:24:34,049 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/379b1d66f4764d418273e5b7087deeef, entries=150, sequenceid=297, filesize=12.0 K 2024-11-23T15:24:34,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/28d70fcecbfc48d2b32d856377bab704 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/28d70fcecbfc48d2b32d856377bab704 2024-11-23T15:24:34,058 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/28d70fcecbfc48d2b32d856377bab704, entries=150, sequenceid=297, filesize=12.0 K 2024-11-23T15:24:34,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/0ad7203d568c47f79c5ac10e5b13f869 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0ad7203d568c47f79c5ac10e5b13f869 2024-11-23T15:24:34,066 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0ad7203d568c47f79c5ac10e5b13f869, entries=150, sequenceid=297, filesize=12.0 K 2024-11-23T15:24:34,067 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for d6bd711ee7b1117306956b276de6b58d in 916ms, sequenceid=297, compaction requested=false 2024-11-23T15:24:34,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:34,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:34,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-23T15:24:34,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-23T15:24:34,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-23T15:24:34,072 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0090 sec 2024-11-23T15:24:34,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 2.0150 sec 2024-11-23T15:24:34,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T15:24:34,165 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-23T15:24:34,167 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:34,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-23T15:24:34,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T15:24:34,171 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:34,172 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:34,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:34,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T15:24:34,324 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-23T15:24:34,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:34,326 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T15:24:34,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:34,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:34,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:34,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:34,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:34,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:34,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/bbfd6d1a4dc7414d9332939e5a854da4 is 50, key is test_row_0/A:col10/1732375473375/Put/seqid=0 2024-11-23T15:24:34,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741902_1078 (size=12301) 2024-11-23T15:24:34,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T15:24:34,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:34,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:34,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375534501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375534501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375534502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375534502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375534504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375534605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375534606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375534606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375534606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375534608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,739 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/bbfd6d1a4dc7414d9332939e5a854da4 2024-11-23T15:24:34,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/efe2c3ee5afc4ee2b989e9e824a127af is 50, key is test_row_0/B:col10/1732375473375/Put/seqid=0 2024-11-23T15:24:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741903_1079 (size=12301) 2024-11-23T15:24:34,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T15:24:34,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375534809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375534809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375534809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375534809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:34,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:34,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375534810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375535113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375535114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375535114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375535115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375535117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,154 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/efe2c3ee5afc4ee2b989e9e824a127af 2024-11-23T15:24:35,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/62ceb8703821403f9d822ee2a51f6181 is 50, key is test_row_0/C:col10/1732375473375/Put/seqid=0 2024-11-23T15:24:35,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741904_1080 (size=12301) 2024-11-23T15:24:35,172 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/62ceb8703821403f9d822ee2a51f6181 2024-11-23T15:24:35,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/bbfd6d1a4dc7414d9332939e5a854da4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bbfd6d1a4dc7414d9332939e5a854da4 2024-11-23T15:24:35,183 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bbfd6d1a4dc7414d9332939e5a854da4, entries=150, sequenceid=327, filesize=12.0 K 2024-11-23T15:24:35,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/efe2c3ee5afc4ee2b989e9e824a127af as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/efe2c3ee5afc4ee2b989e9e824a127af 2024-11-23T15:24:35,197 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/efe2c3ee5afc4ee2b989e9e824a127af, entries=150, sequenceid=327, filesize=12.0 K 2024-11-23T15:24:35,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/62ceb8703821403f9d822ee2a51f6181 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/62ceb8703821403f9d822ee2a51f6181 2024-11-23T15:24:35,205 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/62ceb8703821403f9d822ee2a51f6181, entries=150, sequenceid=327, filesize=12.0 K 2024-11-23T15:24:35,208 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for d6bd711ee7b1117306956b276de6b58d in 883ms, sequenceid=327, compaction requested=true 2024-11-23T15:24:35,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:35,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:35,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-23T15:24:35,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-23T15:24:35,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-23T15:24:35,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0380 sec 2024-11-23T15:24:35,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.0460 sec 2024-11-23T15:24:35,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T15:24:35,275 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-23T15:24:35,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:35,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-23T15:24:35,279 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:35,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T15:24:35,279 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:35,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:35,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T15:24:35,431 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-23T15:24:35,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:35,432 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-23T15:24:35,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:35,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:35,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:35,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:35,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:35,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:35,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/174adfd0ef914bcda4810a7f1e7e1685 is 50, key is test_row_0/A:col10/1732375474502/Put/seqid=0 2024-11-23T15:24:35,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741905_1081 (size=12301) 2024-11-23T15:24:35,451 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/174adfd0ef914bcda4810a7f1e7e1685 2024-11-23T15:24:35,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/b130dd77da2b4c9981cc33ff82d617dc is 50, key is test_row_0/B:col10/1732375474502/Put/seqid=0 2024-11-23T15:24:35,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741906_1082 (size=12301) 2024-11-23T15:24:35,480 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/b130dd77da2b4c9981cc33ff82d617dc 2024-11-23T15:24:35,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/45d439019d4d48a4a3bf848e7fd66ffa is 50, key is test_row_0/C:col10/1732375474502/Put/seqid=0 2024-11-23T15:24:35,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741907_1083 (size=12301) 2024-11-23T15:24:35,507 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/45d439019d4d48a4a3bf848e7fd66ffa 2024-11-23T15:24:35,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/174adfd0ef914bcda4810a7f1e7e1685 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/174adfd0ef914bcda4810a7f1e7e1685 2024-11-23T15:24:35,522 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/174adfd0ef914bcda4810a7f1e7e1685, entries=150, sequenceid=336, filesize=12.0 K 2024-11-23T15:24:35,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/b130dd77da2b4c9981cc33ff82d617dc as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/b130dd77da2b4c9981cc33ff82d617dc 2024-11-23T15:24:35,530 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/b130dd77da2b4c9981cc33ff82d617dc, entries=150, sequenceid=336, filesize=12.0 K 2024-11-23T15:24:35,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/45d439019d4d48a4a3bf848e7fd66ffa as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/45d439019d4d48a4a3bf848e7fd66ffa 2024-11-23T15:24:35,538 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/45d439019d4d48a4a3bf848e7fd66ffa, entries=150, sequenceid=336, filesize=12.0 K 2024-11-23T15:24:35,540 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for d6bd711ee7b1117306956b276de6b58d in 108ms, sequenceid=336, compaction requested=true 2024-11-23T15:24:35,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:35,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:35,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-23T15:24:35,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-23T15:24:35,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-23T15:24:35,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 262 msec 2024-11-23T15:24:35,546 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 268 msec 2024-11-23T15:24:35,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T15:24:35,581 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-23T15:24:35,583 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:35,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-23T15:24:35,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T15:24:35,587 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:35,588 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:35,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:35,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:35,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:24:35,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:35,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:35,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:35,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:35,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:35,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:35,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/fd7d6725596c4afabe6b19e933aeca0b is 50, key is test_row_0/A:col10/1732375475639/Put/seqid=0 2024-11-23T15:24:35,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741908_1084 (size=12301) 2024-11-23T15:24:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T15:24:35,741 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T15:24:35,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:35,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:35,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:35,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:35,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:35,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:35,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375535773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375535773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375535773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375535774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375535774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375535878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375535878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375535878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375535879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375535879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T15:24:35,894 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:35,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T15:24:35,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:35,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:35,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:35,896 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:35,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,048 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T15:24:36,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:36,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/fd7d6725596c4afabe6b19e933aeca0b 2024-11-23T15:24:36,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/825c008af7594132a39c31e2d4354f93 is 50, key is test_row_0/B:col10/1732375475639/Put/seqid=0 2024-11-23T15:24:36,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741909_1085 (size=12301) 2024-11-23T15:24:36,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375536083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375536084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375536084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375536084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375536085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T15:24:36,201 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T15:24:36,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:36,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,202 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,354 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T15:24:36,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:36,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375536385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375536386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375536387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375536390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375536388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/825c008af7594132a39c31e2d4354f93 2024-11-23T15:24:36,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/200945a1a917429b9d3bcf4f6c45add1 is 50, key is test_row_0/C:col10/1732375475639/Put/seqid=0 2024-11-23T15:24:36,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741910_1086 (size=12301) 2024-11-23T15:24:36,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/200945a1a917429b9d3bcf4f6c45add1 2024-11-23T15:24:36,508 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T15:24:36,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:36,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,509 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:36,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/fd7d6725596c4afabe6b19e933aeca0b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/fd7d6725596c4afabe6b19e933aeca0b 2024-11-23T15:24:36,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/fd7d6725596c4afabe6b19e933aeca0b, entries=150, sequenceid=350, filesize=12.0 K 2024-11-23T15:24:36,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/825c008af7594132a39c31e2d4354f93 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/825c008af7594132a39c31e2d4354f93 2024-11-23T15:24:36,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/825c008af7594132a39c31e2d4354f93, entries=150, sequenceid=350, filesize=12.0 K 2024-11-23T15:24:36,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/200945a1a917429b9d3bcf4f6c45add1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/200945a1a917429b9d3bcf4f6c45add1 2024-11-23T15:24:36,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/200945a1a917429b9d3bcf4f6c45add1, entries=150, sequenceid=350, filesize=12.0 K 2024-11-23T15:24:36,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d6bd711ee7b1117306956b276de6b58d in 907ms, sequenceid=350, compaction requested=true 2024-11-23T15:24:36,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:36,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:36,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:36,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:36,546 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T15:24:36,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:36,546 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T15:24:36,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:36,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:36,551 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62153 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T15:24:36,551 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/B is initiating minor compaction (all files) 2024-11-23T15:24:36,551 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/B in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,551 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/22e456cb42484fbea5f9a7082fc4171c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/28d70fcecbfc48d2b32d856377bab704, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/efe2c3ee5afc4ee2b989e9e824a127af, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/b130dd77da2b4c9981cc33ff82d617dc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/825c008af7594132a39c31e2d4354f93] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=60.7 K 2024-11-23T15:24:36,552 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62153 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T15:24:36,552 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/A is initiating minor compaction (all files) 2024-11-23T15:24:36,552 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/A in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,552 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e80ab0ae06144da4ba9a402305f7775c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/379b1d66f4764d418273e5b7087deeef, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bbfd6d1a4dc7414d9332939e5a854da4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/174adfd0ef914bcda4810a7f1e7e1685, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/fd7d6725596c4afabe6b19e933aeca0b] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=60.7 K 2024-11-23T15:24:36,552 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 22e456cb42484fbea5f9a7082fc4171c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732375471589 2024-11-23T15:24:36,553 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e80ab0ae06144da4ba9a402305f7775c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732375471589 2024-11-23T15:24:36,553 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 28d70fcecbfc48d2b32d856377bab704, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732375472222 2024-11-23T15:24:36,556 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting efe2c3ee5afc4ee2b989e9e824a127af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732375473372 2024-11-23T15:24:36,556 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 379b1d66f4764d418273e5b7087deeef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732375472222 2024-11-23T15:24:36,557 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b130dd77da2b4c9981cc33ff82d617dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732375474494 2024-11-23T15:24:36,557 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbfd6d1a4dc7414d9332939e5a854da4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732375473372 2024-11-23T15:24:36,558 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 174adfd0ef914bcda4810a7f1e7e1685, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732375474494 2024-11-23T15:24:36,558 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd7d6725596c4afabe6b19e933aeca0b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732375475639 2024-11-23T15:24:36,558 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 825c008af7594132a39c31e2d4354f93, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732375475639 2024-11-23T15:24:36,583 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#A#compaction#72 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:36,584 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/a77b70daf51d48149a1859bd35c45a06 is 50, key is test_row_0/A:col10/1732375475639/Put/seqid=0 2024-11-23T15:24:36,586 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#B#compaction#73 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:36,586 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/746faef8e3864cc1880465bb012a61d3 is 50, key is test_row_0/B:col10/1732375475639/Put/seqid=0 2024-11-23T15:24:36,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741911_1087 (size=13119) 2024-11-23T15:24:36,605 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/a77b70daf51d48149a1859bd35c45a06 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/a77b70daf51d48149a1859bd35c45a06 2024-11-23T15:24:36,614 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/A of d6bd711ee7b1117306956b276de6b58d into a77b70daf51d48149a1859bd35c45a06(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:36,614 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:36,614 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/A, priority=11, startTime=1732375476546; duration=0sec 2024-11-23T15:24:36,614 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:36,614 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:A 2024-11-23T15:24:36,614 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T15:24:36,618 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62153 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T15:24:36,618 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/C is initiating minor compaction (all files) 2024-11-23T15:24:36,618 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/C in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,618 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/57009fe9f4dd468ab2a5a77450d9d34d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0ad7203d568c47f79c5ac10e5b13f869, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/62ceb8703821403f9d822ee2a51f6181, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/45d439019d4d48a4a3bf848e7fd66ffa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/200945a1a917429b9d3bcf4f6c45add1] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=60.7 K 2024-11-23T15:24:36,619 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57009fe9f4dd468ab2a5a77450d9d34d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732375471589 2024-11-23T15:24:36,619 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ad7203d568c47f79c5ac10e5b13f869, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732375472222 2024-11-23T15:24:36,621 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62ceb8703821403f9d822ee2a51f6181, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732375473372 2024-11-23T15:24:36,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741912_1088 (size=13119) 2024-11-23T15:24:36,621 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45d439019d4d48a4a3bf848e7fd66ffa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732375474494 2024-11-23T15:24:36,622 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 200945a1a917429b9d3bcf4f6c45add1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732375475639 2024-11-23T15:24:36,634 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/746faef8e3864cc1880465bb012a61d3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/746faef8e3864cc1880465bb012a61d3 2024-11-23T15:24:36,643 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/B of d6bd711ee7b1117306956b276de6b58d into 746faef8e3864cc1880465bb012a61d3(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:36,643 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:36,643 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/B, priority=11, startTime=1732375476546; duration=0sec 2024-11-23T15:24:36,643 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:36,643 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:B 2024-11-23T15:24:36,651 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#C#compaction#74 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:36,652 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/00230f55e52e417d817b90b6550b6715 is 50, key is test_row_0/C:col10/1732375475639/Put/seqid=0 2024-11-23T15:24:36,663 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T15:24:36,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:36,665 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T15:24:36,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:36,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:36,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:36,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:36,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:36,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:36,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/8b6ff5ab9d774c34811cc1022b95c156 is 50, key is test_row_0/A:col10/1732375475747/Put/seqid=0 2024-11-23T15:24:36,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741913_1089 (size=13119) 2024-11-23T15:24:36,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741914_1090 (size=12301) 2024-11-23T15:24:36,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T15:24:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:36,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:36,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375536899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375536899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375536899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375536900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:36,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375536901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375537003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375537004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375537005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375537005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375537006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,082 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/8b6ff5ab9d774c34811cc1022b95c156 2024-11-23T15:24:37,086 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/00230f55e52e417d817b90b6550b6715 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/00230f55e52e417d817b90b6550b6715 2024-11-23T15:24:37,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/c5df623b9d1646afb89cad0b7019e15f is 50, key is test_row_0/B:col10/1732375475747/Put/seqid=0 2024-11-23T15:24:37,094 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/C of d6bd711ee7b1117306956b276de6b58d into 00230f55e52e417d817b90b6550b6715(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:37,094 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:37,094 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/C, priority=11, startTime=1732375476546; duration=0sec 2024-11-23T15:24:37,094 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:37,094 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:C 2024-11-23T15:24:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741915_1091 (size=12301) 2024-11-23T15:24:37,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375537205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375537210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375537210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375537211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375537211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,498 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/c5df623b9d1646afb89cad0b7019e15f 2024-11-23T15:24:37,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375537510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375537514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/de0e3f535bae489884faf406d2b6a2d7 is 50, key is test_row_0/C:col10/1732375475747/Put/seqid=0 2024-11-23T15:24:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375537514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375537514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375537515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:37,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741916_1092 (size=12301) 2024-11-23T15:24:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T15:24:37,923 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/de0e3f535bae489884faf406d2b6a2d7 2024-11-23T15:24:37,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/8b6ff5ab9d774c34811cc1022b95c156 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/8b6ff5ab9d774c34811cc1022b95c156 2024-11-23T15:24:37,936 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/8b6ff5ab9d774c34811cc1022b95c156, entries=150, sequenceid=374, filesize=12.0 K 2024-11-23T15:24:37,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/c5df623b9d1646afb89cad0b7019e15f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c5df623b9d1646afb89cad0b7019e15f 2024-11-23T15:24:37,943 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c5df623b9d1646afb89cad0b7019e15f, entries=150, sequenceid=374, filesize=12.0 K 2024-11-23T15:24:37,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/de0e3f535bae489884faf406d2b6a2d7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/de0e3f535bae489884faf406d2b6a2d7 2024-11-23T15:24:37,951 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/de0e3f535bae489884faf406d2b6a2d7, entries=150, sequenceid=374, filesize=12.0 K 2024-11-23T15:24:37,952 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d6bd711ee7b1117306956b276de6b58d in 1287ms, sequenceid=374, compaction requested=false 2024-11-23T15:24:37,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:37,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:37,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-23T15:24:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-23T15:24:37,955 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-23T15:24:37,955 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3650 sec 2024-11-23T15:24:37,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 2.3730 sec 2024-11-23T15:24:38,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:38,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T15:24:38,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:38,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:38,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:38,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:38,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:38,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:38,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c792d4cf10ba492e8da0c5603456881a is 50, key is test_row_0/A:col10/1732375476898/Put/seqid=0 2024-11-23T15:24:38,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741917_1093 (size=14741) 2024-11-23T15:24:38,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375538035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375538036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375538039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375538039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375538039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375538140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375538140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375538144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375538145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375538145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375538343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375538343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375538347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375538348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375538349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,428 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c792d4cf10ba492e8da0c5603456881a 2024-11-23T15:24:38,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/daa8143dac9d4ac8aed18cd93f559b27 is 50, key is test_row_0/B:col10/1732375476898/Put/seqid=0 2024-11-23T15:24:38,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741918_1094 (size=12301) 2024-11-23T15:24:38,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/daa8143dac9d4ac8aed18cd93f559b27 2024-11-23T15:24:38,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/6cd707d0a1ac4bb4be6b7fbbe0406f45 is 50, key is test_row_0/C:col10/1732375476898/Put/seqid=0 2024-11-23T15:24:38,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741919_1095 (size=12301) 2024-11-23T15:24:38,467 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/6cd707d0a1ac4bb4be6b7fbbe0406f45 2024-11-23T15:24:38,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c792d4cf10ba492e8da0c5603456881a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c792d4cf10ba492e8da0c5603456881a 2024-11-23T15:24:38,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c792d4cf10ba492e8da0c5603456881a, entries=200, sequenceid=390, filesize=14.4 K 2024-11-23T15:24:38,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/daa8143dac9d4ac8aed18cd93f559b27 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/daa8143dac9d4ac8aed18cd93f559b27 2024-11-23T15:24:38,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/daa8143dac9d4ac8aed18cd93f559b27, entries=150, sequenceid=390, filesize=12.0 K 2024-11-23T15:24:38,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/6cd707d0a1ac4bb4be6b7fbbe0406f45 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6cd707d0a1ac4bb4be6b7fbbe0406f45 2024-11-23T15:24:38,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6cd707d0a1ac4bb4be6b7fbbe0406f45, entries=150, sequenceid=390, filesize=12.0 K 2024-11-23T15:24:38,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for d6bd711ee7b1117306956b276de6b58d in 480ms, sequenceid=390, compaction requested=true 2024-11-23T15:24:38,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:38,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:38,496 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:38,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:38,497 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:38,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:38,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:38,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:38,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:38,498 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40161 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:38,498 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:38,498 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/A is initiating minor compaction (all files) 2024-11-23T15:24:38,498 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/B is initiating minor compaction (all files) 2024-11-23T15:24:38,498 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/A in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:38,498 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/B in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:38,498 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/a77b70daf51d48149a1859bd35c45a06, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/8b6ff5ab9d774c34811cc1022b95c156, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c792d4cf10ba492e8da0c5603456881a] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=39.2 K 2024-11-23T15:24:38,498 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/746faef8e3864cc1880465bb012a61d3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c5df623b9d1646afb89cad0b7019e15f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/daa8143dac9d4ac8aed18cd93f559b27] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.8 K 2024-11-23T15:24:38,499 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 746faef8e3864cc1880465bb012a61d3, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732375475639 2024-11-23T15:24:38,499 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a77b70daf51d48149a1859bd35c45a06, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732375475639 2024-11-23T15:24:38,499 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c5df623b9d1646afb89cad0b7019e15f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732375475743 2024-11-23T15:24:38,499 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b6ff5ab9d774c34811cc1022b95c156, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732375475743 2024-11-23T15:24:38,500 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting daa8143dac9d4ac8aed18cd93f559b27, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732375476894 2024-11-23T15:24:38,500 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c792d4cf10ba492e8da0c5603456881a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732375476894 2024-11-23T15:24:38,511 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#B#compaction#81 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:38,512 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/db56faea3b8244208739d33d644422b4 is 50, key is test_row_0/B:col10/1732375476898/Put/seqid=0 2024-11-23T15:24:38,514 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#A#compaction#82 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:38,515 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/7e80475c2e404fa2b391a21610935b90 is 50, key is test_row_0/A:col10/1732375476898/Put/seqid=0 2024-11-23T15:24:38,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741920_1096 (size=13221) 2024-11-23T15:24:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741921_1097 (size=13221) 2024-11-23T15:24:38,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:38,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T15:24:38,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:38,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:38,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:38,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:38,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:38,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:38,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c4ff81ee4fff49348ba117b9dac81c79 is 50, key is test_row_0/A:col10/1732375478647/Put/seqid=0 2024-11-23T15:24:38,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741922_1098 (size=12301) 2024-11-23T15:24:38,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375538655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375538656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375538656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375538657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375538660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375538761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375538761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375538762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375538763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375538763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,931 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/db56faea3b8244208739d33d644422b4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/db56faea3b8244208739d33d644422b4 2024-11-23T15:24:38,939 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/7e80475c2e404fa2b391a21610935b90 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/7e80475c2e404fa2b391a21610935b90 2024-11-23T15:24:38,940 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/B of d6bd711ee7b1117306956b276de6b58d into db56faea3b8244208739d33d644422b4(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:38,940 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:38,941 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/B, priority=13, startTime=1732375478496; duration=0sec 2024-11-23T15:24:38,941 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:38,941 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:B 2024-11-23T15:24:38,941 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:38,943 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:38,943 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/C is initiating minor compaction (all files) 2024-11-23T15:24:38,943 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/C in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:38,943 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/00230f55e52e417d817b90b6550b6715, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/de0e3f535bae489884faf406d2b6a2d7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6cd707d0a1ac4bb4be6b7fbbe0406f45] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.8 K 2024-11-23T15:24:38,944 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 00230f55e52e417d817b90b6550b6715, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732375475639 2024-11-23T15:24:38,944 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting de0e3f535bae489884faf406d2b6a2d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732375475743 2024-11-23T15:24:38,945 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cd707d0a1ac4bb4be6b7fbbe0406f45, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732375476894 2024-11-23T15:24:38,947 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/A of d6bd711ee7b1117306956b276de6b58d into 7e80475c2e404fa2b391a21610935b90(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:38,947 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:38,947 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/A, priority=13, startTime=1732375478496; duration=0sec 2024-11-23T15:24:38,947 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:38,947 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:A 2024-11-23T15:24:38,954 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#C#compaction#84 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:38,954 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/e9fb149977634b15ad2f1fa5f21d2cd4 is 50, key is test_row_0/C:col10/1732375476898/Put/seqid=0 2024-11-23T15:24:38,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741923_1099 (size=13221) 2024-11-23T15:24:38,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375538963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375538965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375538966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375538967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375538967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:38,974 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/e9fb149977634b15ad2f1fa5f21d2cd4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/e9fb149977634b15ad2f1fa5f21d2cd4 2024-11-23T15:24:38,982 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/C of d6bd711ee7b1117306956b276de6b58d into e9fb149977634b15ad2f1fa5f21d2cd4(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:38,982 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:38,982 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/C, priority=13, startTime=1732375478497; duration=0sec 2024-11-23T15:24:38,982 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:38,982 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:C 2024-11-23T15:24:39,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c4ff81ee4fff49348ba117b9dac81c79 2024-11-23T15:24:39,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/6d76a9c690b7492e9601f7a430c6f155 is 50, key is test_row_0/B:col10/1732375478647/Put/seqid=0 2024-11-23T15:24:39,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741924_1100 (size=12301) 2024-11-23T15:24:39,077 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/6d76a9c690b7492e9601f7a430c6f155 2024-11-23T15:24:39,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/90e070b5a72c427087b7bd3b5c92c289 is 50, key is test_row_0/C:col10/1732375478647/Put/seqid=0 2024-11-23T15:24:39,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741925_1101 (size=12301) 2024-11-23T15:24:39,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375539267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375539269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375539269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375539270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375539272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/90e070b5a72c427087b7bd3b5c92c289 2024-11-23T15:24:39,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c4ff81ee4fff49348ba117b9dac81c79 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c4ff81ee4fff49348ba117b9dac81c79 2024-11-23T15:24:39,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c4ff81ee4fff49348ba117b9dac81c79, entries=150, sequenceid=414, filesize=12.0 K 2024-11-23T15:24:39,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/6d76a9c690b7492e9601f7a430c6f155 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/6d76a9c690b7492e9601f7a430c6f155 2024-11-23T15:24:39,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/6d76a9c690b7492e9601f7a430c6f155, entries=150, sequenceid=414, filesize=12.0 K 2024-11-23T15:24:39,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/90e070b5a72c427087b7bd3b5c92c289 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/90e070b5a72c427087b7bd3b5c92c289 2024-11-23T15:24:39,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/90e070b5a72c427087b7bd3b5c92c289, entries=150, sequenceid=414, filesize=12.0 K 2024-11-23T15:24:39,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d6bd711ee7b1117306956b276de6b58d in 873ms, sequenceid=414, compaction requested=false 2024-11-23T15:24:39,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:39,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T15:24:39,692 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-23T15:24:39,693 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:39,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-23T15:24:39,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T15:24:39,697 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:39,698 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:39,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:39,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:39,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T15:24:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:39,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:39,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:39,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/489133c465e748dc9f46b9b02adcbdc6 is 50, key is test_row_0/A:col10/1732375479774/Put/seqid=0 2024-11-23T15:24:39,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741926_1102 (size=12301) 2024-11-23T15:24:39,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375539790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375539792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375539792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375539793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375539795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T15:24:39,850 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T15:24:39,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:39,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:39,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:39,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:39,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:39,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:39,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375539896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375539897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375539897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375539898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375539898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T15:24:40,004 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T15:24:40,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:40,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,005 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375540098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375540100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375540101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375540101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375540102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,157 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T15:24:40,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:40,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,189 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/489133c465e748dc9f46b9b02adcbdc6 2024-11-23T15:24:40,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/d688b5d3816547c5a1a5974523a4bb83 is 50, key is test_row_0/B:col10/1732375479774/Put/seqid=0 2024-11-23T15:24:40,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741927_1103 (size=12301) 2024-11-23T15:24:40,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T15:24:40,310 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T15:24:40,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:40,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375540401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375540403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375540403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375540404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375540408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,463 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T15:24:40,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:40,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/d688b5d3816547c5a1a5974523a4bb83 2024-11-23T15:24:40,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T15:24:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/77dce7680f104a7ea031329ef42bb2ce is 50, key is test_row_0/C:col10/1732375479774/Put/seqid=0 2024-11-23T15:24:40,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741928_1104 (size=12301) 2024-11-23T15:24:40,771 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T15:24:40,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:40,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T15:24:40,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375540905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375540906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375540909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375540910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:40,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375540913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,924 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:40,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T15:24:40,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:40,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:40,925 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:40,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:41,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/77dce7680f104a7ea031329ef42bb2ce 2024-11-23T15:24:41,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/489133c465e748dc9f46b9b02adcbdc6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/489133c465e748dc9f46b9b02adcbdc6 2024-11-23T15:24:41,042 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/489133c465e748dc9f46b9b02adcbdc6, entries=150, sequenceid=432, filesize=12.0 K 2024-11-23T15:24:41,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/d688b5d3816547c5a1a5974523a4bb83 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/d688b5d3816547c5a1a5974523a4bb83 2024-11-23T15:24:41,050 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/d688b5d3816547c5a1a5974523a4bb83, entries=150, sequenceid=432, filesize=12.0 K 2024-11-23T15:24:41,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/77dce7680f104a7ea031329ef42bb2ce as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/77dce7680f104a7ea031329ef42bb2ce 2024-11-23T15:24:41,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/77dce7680f104a7ea031329ef42bb2ce, entries=150, sequenceid=432, filesize=12.0 K 2024-11-23T15:24:41,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for d6bd711ee7b1117306956b276de6b58d in 1282ms, sequenceid=432, compaction requested=true 2024-11-23T15:24:41,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:41,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:41,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:41,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:41,058 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:41,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:41,058 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:41,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:41,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:41,059 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:41,059 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:41,059 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/A is initiating minor compaction (all files) 2024-11-23T15:24:41,059 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/B is initiating minor compaction (all files) 2024-11-23T15:24:41,059 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/A in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:41,059 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/B in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:41,060 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/db56faea3b8244208739d33d644422b4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/6d76a9c690b7492e9601f7a430c6f155, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/d688b5d3816547c5a1a5974523a4bb83] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.9 K 2024-11-23T15:24:41,060 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/7e80475c2e404fa2b391a21610935b90, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c4ff81ee4fff49348ba117b9dac81c79, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/489133c465e748dc9f46b9b02adcbdc6] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.9 K 2024-11-23T15:24:41,060 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting db56faea3b8244208739d33d644422b4, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732375476894 2024-11-23T15:24:41,060 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e80475c2e404fa2b391a21610935b90, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732375476894 2024-11-23T15:24:41,061 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d76a9c690b7492e9601f7a430c6f155, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732375478037 2024-11-23T15:24:41,061 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4ff81ee4fff49348ba117b9dac81c79, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732375478037 2024-11-23T15:24:41,061 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting d688b5d3816547c5a1a5974523a4bb83, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732375479774 2024-11-23T15:24:41,061 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 489133c465e748dc9f46b9b02adcbdc6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732375479774 2024-11-23T15:24:41,070 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#B#compaction#90 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:41,071 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/c4884e2e6fa442aeb9069551a5f4722b is 50, key is test_row_0/B:col10/1732375479774/Put/seqid=0 2024-11-23T15:24:41,073 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#A#compaction#91 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:41,074 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/0c1c674cedae4bea96b286d0ccc0155d is 50, key is test_row_0/A:col10/1732375479774/Put/seqid=0 2024-11-23T15:24:41,077 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:41,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T15:24:41,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:41,078 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-23T15:24:41,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:41,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:41,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:41,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:41,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:41,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:41,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c9e9eaad4992426991489f948b01f849 is 50, key is test_row_0/A:col10/1732375479788/Put/seqid=0 2024-11-23T15:24:41,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741930_1106 (size=13323) 2024-11-23T15:24:41,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741929_1105 (size=13323) 2024-11-23T15:24:41,097 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/0c1c674cedae4bea96b286d0ccc0155d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0c1c674cedae4bea96b286d0ccc0155d 2024-11-23T15:24:41,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741931_1107 (size=12301) 2024-11-23T15:24:41,103 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/A of d6bd711ee7b1117306956b276de6b58d into 0c1c674cedae4bea96b286d0ccc0155d(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:41,103 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:41,103 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/A, priority=13, startTime=1732375481058; duration=0sec 2024-11-23T15:24:41,103 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:41,103 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:A 2024-11-23T15:24:41,103 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:41,105 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:41,105 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/C is initiating minor compaction (all files) 2024-11-23T15:24:41,105 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/C in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:41,105 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/e9fb149977634b15ad2f1fa5f21d2cd4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/90e070b5a72c427087b7bd3b5c92c289, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/77dce7680f104a7ea031329ef42bb2ce] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=36.9 K 2024-11-23T15:24:41,106 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9fb149977634b15ad2f1fa5f21d2cd4, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732375476894 2024-11-23T15:24:41,106 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90e070b5a72c427087b7bd3b5c92c289, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1732375478037 2024-11-23T15:24:41,107 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77dce7680f104a7ea031329ef42bb2ce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732375479774 2024-11-23T15:24:41,115 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#C#compaction#93 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:41,116 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/e7d9a5ffde2f4599aec715024eb94b10 is 50, key is test_row_0/C:col10/1732375479774/Put/seqid=0 2024-11-23T15:24:41,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741932_1108 (size=13323) 2024-11-23T15:24:41,494 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/c4884e2e6fa442aeb9069551a5f4722b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c4884e2e6fa442aeb9069551a5f4722b 2024-11-23T15:24:41,499 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c9e9eaad4992426991489f948b01f849 2024-11-23T15:24:41,502 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/B of d6bd711ee7b1117306956b276de6b58d into c4884e2e6fa442aeb9069551a5f4722b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:41,502 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:41,502 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/B, priority=13, startTime=1732375481058; duration=0sec 2024-11-23T15:24:41,502 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:41,502 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:B 2024-11-23T15:24:41,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/852ae807d2574119afffc328d3e436a4 is 50, key is test_row_0/B:col10/1732375479788/Put/seqid=0 2024-11-23T15:24:41,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741933_1109 (size=12301) 2024-11-23T15:24:41,530 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/e7d9a5ffde2f4599aec715024eb94b10 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/e7d9a5ffde2f4599aec715024eb94b10 2024-11-23T15:24:41,536 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/C of d6bd711ee7b1117306956b276de6b58d into e7d9a5ffde2f4599aec715024eb94b10(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:41,536 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:41,536 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/C, priority=13, startTime=1732375481058; duration=0sec 2024-11-23T15:24:41,536 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:41,536 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:C 2024-11-23T15:24:41,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T15:24:41,912 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/852ae807d2574119afffc328d3e436a4 2024-11-23T15:24:41,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:41,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:41,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:41,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:41,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375541922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:41,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375541922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:41,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:41,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375541923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:41,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:41,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375541924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:41,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:41,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375541924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:41,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/d346430b2ba5408589969cc7458224a9 is 50, key is test_row_0/C:col10/1732375479788/Put/seqid=0 2024-11-23T15:24:41,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741934_1110 (size=12301) 2024-11-23T15:24:41,938 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/d346430b2ba5408589969cc7458224a9 2024-11-23T15:24:41,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c9e9eaad4992426991489f948b01f849 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c9e9eaad4992426991489f948b01f849 2024-11-23T15:24:41,952 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c9e9eaad4992426991489f948b01f849, entries=150, sequenceid=453, filesize=12.0 K 2024-11-23T15:24:41,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/852ae807d2574119afffc328d3e436a4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/852ae807d2574119afffc328d3e436a4 2024-11-23T15:24:41,959 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/852ae807d2574119afffc328d3e436a4, entries=150, sequenceid=453, filesize=12.0 K 2024-11-23T15:24:41,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/d346430b2ba5408589969cc7458224a9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/d346430b2ba5408589969cc7458224a9 2024-11-23T15:24:41,967 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/d346430b2ba5408589969cc7458224a9, entries=150, sequenceid=453, filesize=12.0 K 2024-11-23T15:24:41,968 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for d6bd711ee7b1117306956b276de6b58d in 890ms, sequenceid=453, compaction requested=false 2024-11-23T15:24:41,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:41,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:41,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-23T15:24:41,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-23T15:24:41,971 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-23T15:24:41,971 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2710 sec 2024-11-23T15:24:41,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 2.2770 sec 2024-11-23T15:24:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:42,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T15:24:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:42,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/63347417d465484788a125b3e17fe081 is 50, key is test_row_0/A:col10/1732375481923/Put/seqid=0 2024-11-23T15:24:42,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741935_1111 (size=12301) 2024-11-23T15:24:42,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375542045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375542045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375542047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375542049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375542049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375542151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375542151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375542152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375542152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375542153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375542353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375542353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375542354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375542354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375542356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/63347417d465484788a125b3e17fe081 2024-11-23T15:24:42,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/9b41f9b3bc614ebd8cfce73664a65e08 is 50, key is test_row_0/B:col10/1732375481923/Put/seqid=0 2024-11-23T15:24:42,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741936_1112 (size=12301) 2024-11-23T15:24:42,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375542655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375542656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375542657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375542658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375542660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:42,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/9b41f9b3bc614ebd8cfce73664a65e08 2024-11-23T15:24:42,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/7bf4d2d33b1b4bb692f551b26ccaead6 is 50, key is test_row_0/C:col10/1732375481923/Put/seqid=0 2024-11-23T15:24:42,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741937_1113 (size=12301) 2024-11-23T15:24:43,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:43,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59172 deadline: 1732375543158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:43,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:43,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59158 deadline: 1732375543158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:43,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:43,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59180 deadline: 1732375543161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:43,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59132 deadline: 1732375543164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:43,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:43,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59196 deadline: 1732375543165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:43,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/7bf4d2d33b1b4bb692f551b26ccaead6 2024-11-23T15:24:43,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/63347417d465484788a125b3e17fe081 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/63347417d465484788a125b3e17fe081 2024-11-23T15:24:43,302 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/63347417d465484788a125b3e17fe081, entries=150, sequenceid=474, filesize=12.0 K 2024-11-23T15:24:43,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/9b41f9b3bc614ebd8cfce73664a65e08 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9b41f9b3bc614ebd8cfce73664a65e08 2024-11-23T15:24:43,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9b41f9b3bc614ebd8cfce73664a65e08, entries=150, sequenceid=474, filesize=12.0 K 2024-11-23T15:24:43,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/7bf4d2d33b1b4bb692f551b26ccaead6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7bf4d2d33b1b4bb692f551b26ccaead6 2024-11-23T15:24:43,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7bf4d2d33b1b4bb692f551b26ccaead6, entries=150, sequenceid=474, filesize=12.0 K 2024-11-23T15:24:43,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for d6bd711ee7b1117306956b276de6b58d in 1292ms, sequenceid=474, compaction requested=true 2024-11-23T15:24:43,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:43,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:43,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:43,319 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:43,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:43,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:43,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d6bd711ee7b1117306956b276de6b58d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:43,319 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:43,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:43,321 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:43,321 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/A is initiating minor compaction (all files) 2024-11-23T15:24:43,321 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/A in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:43,321 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0c1c674cedae4bea96b286d0ccc0155d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c9e9eaad4992426991489f948b01f849, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/63347417d465484788a125b3e17fe081] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=37.0 K 2024-11-23T15:24:43,322 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:43,322 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/B is initiating minor compaction (all files) 2024-11-23T15:24:43,322 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/B in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:43,322 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c4884e2e6fa442aeb9069551a5f4722b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/852ae807d2574119afffc328d3e436a4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9b41f9b3bc614ebd8cfce73664a65e08] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=37.0 K 2024-11-23T15:24:43,322 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c1c674cedae4bea96b286d0ccc0155d, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732375479774 2024-11-23T15:24:43,323 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c4884e2e6fa442aeb9069551a5f4722b, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732375479774 2024-11-23T15:24:43,323 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9e9eaad4992426991489f948b01f849, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1732375479788 2024-11-23T15:24:43,323 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 852ae807d2574119afffc328d3e436a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1732375479788 2024-11-23T15:24:43,323 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63347417d465484788a125b3e17fe081, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732375481922 2024-11-23T15:24:43,324 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b41f9b3bc614ebd8cfce73664a65e08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732375481922 2024-11-23T15:24:43,334 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#B#compaction#99 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:43,335 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#A#compaction#100 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:43,335 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/860d44b0d25f4a01bf218fe20ae0eba6 is 50, key is test_row_0/B:col10/1732375481923/Put/seqid=0 2024-11-23T15:24:43,335 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/b4f658f77caf44a3af209f53d92c1b2a is 50, key is test_row_0/A:col10/1732375481923/Put/seqid=0 2024-11-23T15:24:43,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741938_1114 (size=13425) 2024-11-23T15:24:43,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741939_1115 (size=13425) 2024-11-23T15:24:43,356 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/b4f658f77caf44a3af209f53d92c1b2a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/b4f658f77caf44a3af209f53d92c1b2a 2024-11-23T15:24:43,357 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/860d44b0d25f4a01bf218fe20ae0eba6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/860d44b0d25f4a01bf218fe20ae0eba6 2024-11-23T15:24:43,364 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/B of d6bd711ee7b1117306956b276de6b58d into 860d44b0d25f4a01bf218fe20ae0eba6(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:43,364 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:43,364 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/B, priority=13, startTime=1732375483319; duration=0sec 2024-11-23T15:24:43,364 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:43,364 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:B 2024-11-23T15:24:43,364 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:43,365 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/A of d6bd711ee7b1117306956b276de6b58d into b4f658f77caf44a3af209f53d92c1b2a(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:43,365 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:43,365 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/A, priority=13, startTime=1732375483319; duration=0sec 2024-11-23T15:24:43,365 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:43,365 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:A 2024-11-23T15:24:43,367 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:43,367 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d6bd711ee7b1117306956b276de6b58d/C is initiating minor compaction (all files) 2024-11-23T15:24:43,367 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d6bd711ee7b1117306956b276de6b58d/C in TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:43,367 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/e7d9a5ffde2f4599aec715024eb94b10, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/d346430b2ba5408589969cc7458224a9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7bf4d2d33b1b4bb692f551b26ccaead6] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp, totalSize=37.0 K 2024-11-23T15:24:43,368 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting e7d9a5ffde2f4599aec715024eb94b10, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732375479774 2024-11-23T15:24:43,368 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting d346430b2ba5408589969cc7458224a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1732375479788 2024-11-23T15:24:43,369 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bf4d2d33b1b4bb692f551b26ccaead6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732375481922 2024-11-23T15:24:43,380 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d6bd711ee7b1117306956b276de6b58d#C#compaction#101 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:43,381 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/3e1832b29d654824993838b668f28e9c is 50, key is test_row_0/C:col10/1732375481923/Put/seqid=0 2024-11-23T15:24:43,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741940_1116 (size=13425) 2024-11-23T15:24:43,794 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/3e1832b29d654824993838b668f28e9c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/3e1832b29d654824993838b668f28e9c 2024-11-23T15:24:43,800 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d6bd711ee7b1117306956b276de6b58d/C of d6bd711ee7b1117306956b276de6b58d into 3e1832b29d654824993838b668f28e9c(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:43,800 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:43,800 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d., storeName=d6bd711ee7b1117306956b276de6b58d/C, priority=13, startTime=1732375483319; duration=0sec 2024-11-23T15:24:43,801 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:43,801 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d6bd711ee7b1117306956b276de6b58d:C 2024-11-23T15:24:43,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T15:24:43,801 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-23T15:24:43,802 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:43,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-23T15:24:43,804 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:43,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T15:24:43,805 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:43,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:43,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T15:24:43,956 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:43,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-23T15:24:43,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:43,957 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-23T15:24:43,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:43,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:43,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:43,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:43,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:43,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:43,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/30208c6f6c174bd79eefcde08a7b9447 is 50, key is test_row_0/A:col10/1732375482040/Put/seqid=0 2024-11-23T15:24:43,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741941_1117 (size=12301) 2024-11-23T15:24:43,970 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c8de680 to 127.0.0.1:62881 2024-11-23T15:24:43,970 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f6b07e3 to 127.0.0.1:62881 2024-11-23T15:24:43,970 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72e97e4b to 127.0.0.1:62881 2024-11-23T15:24:43,970 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x490457fd to 127.0.0.1:62881 2024-11-23T15:24:43,971 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:24:43,971 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:24:43,971 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:24:43,971 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:24:44,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T15:24:44,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. as already flushing 2024-11-23T15:24:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:44,167 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04977266 to 127.0.0.1:62881 2024-11-23T15:24:44,168 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:24:44,171 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bbb5d8a to 127.0.0.1:62881 2024-11-23T15:24:44,171 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12885408 to 127.0.0.1:62881 2024-11-23T15:24:44,172 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:24:44,172 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:24:44,174 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72b32f98 to 127.0.0.1:62881 2024-11-23T15:24:44,174 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:24:44,176 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18603bb9 to 127.0.0.1:62881 2024-11-23T15:24:44,176 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:24:44,370 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/30208c6f6c174bd79eefcde08a7b9447 2024-11-23T15:24:44,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/a94468d33ca7485bb369f15ab63f77d8 is 50, key is test_row_0/B:col10/1732375482040/Put/seqid=0 2024-11-23T15:24:44,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741942_1118 (size=12301) 2024-11-23T15:24:44,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T15:24:44,782 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/a94468d33ca7485bb369f15ab63f77d8 2024-11-23T15:24:44,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/248333e177f042a3a5f65b3107d6b3ea is 50, key is test_row_0/C:col10/1732375482040/Put/seqid=0 2024-11-23T15:24:44,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741943_1119 (size=12301) 2024-11-23T15:24:44,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T15:24:45,195 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=496 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/248333e177f042a3a5f65b3107d6b3ea 2024-11-23T15:24:45,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/30208c6f6c174bd79eefcde08a7b9447 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/30208c6f6c174bd79eefcde08a7b9447 2024-11-23T15:24:45,205 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/30208c6f6c174bd79eefcde08a7b9447, entries=150, sequenceid=496, filesize=12.0 K 2024-11-23T15:24:45,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/a94468d33ca7485bb369f15ab63f77d8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/a94468d33ca7485bb369f15ab63f77d8 2024-11-23T15:24:45,209 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/a94468d33ca7485bb369f15ab63f77d8, entries=150, sequenceid=496, filesize=12.0 K 2024-11-23T15:24:45,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/248333e177f042a3a5f65b3107d6b3ea as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/248333e177f042a3a5f65b3107d6b3ea 2024-11-23T15:24:45,214 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/248333e177f042a3a5f65b3107d6b3ea, entries=150, sequenceid=496, filesize=12.0 K 2024-11-23T15:24:45,215 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=33.54 KB/34350 for d6bd711ee7b1117306956b276de6b58d in 1258ms, sequenceid=496, compaction requested=false 2024-11-23T15:24:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-23T15:24:45,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-23T15:24:45,217 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-23T15:24:45,217 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4110 sec 2024-11-23T15:24:45,218 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 1.4150 sec 2024-11-23T15:24:45,401 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T15:24:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T15:24:45,909 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6780 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6736 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2953 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8858 rows 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2966 2024-11-23T15:24:45,909 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8897 rows 2024-11-23T15:24:45,909 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T15:24:45,909 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e98ea32 to 127.0.0.1:62881 2024-11-23T15:24:45,909 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:24:45,915 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T15:24:45,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T15:24:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T15:24:45,926 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375485925"}]},"ts":"1732375485925"} 2024-11-23T15:24:45,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-23T15:24:45,927 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T15:24:45,929 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T15:24:45,930 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T15:24:45,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6bd711ee7b1117306956b276de6b58d, UNASSIGN}] 2024-11-23T15:24:45,935 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6bd711ee7b1117306956b276de6b58d, UNASSIGN 2024-11-23T15:24:45,936 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=d6bd711ee7b1117306956b276de6b58d, regionState=CLOSING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:45,937 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T15:24:45,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:24:46,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-23T15:24:46,091 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:46,093 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:46,093 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T15:24:46,094 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing d6bd711ee7b1117306956b276de6b58d, disabling compactions & flushes 2024-11-23T15:24:46,094 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:46,094 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:46,094 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. after waiting 0 ms 2024-11-23T15:24:46,094 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:46,094 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(2837): Flushing d6bd711ee7b1117306956b276de6b58d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T15:24:46,095 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=A 2024-11-23T15:24:46,095 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:46,095 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=B 2024-11-23T15:24:46,095 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:46,095 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d6bd711ee7b1117306956b276de6b58d, store=C 2024-11-23T15:24:46,095 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:46,099 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c9b8f1ec3b844b13a170a3cf43fff114 is 50, key is test_row_1/A:col10/1732375484173/Put/seqid=0 2024-11-23T15:24:46,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741944_1120 (size=9857) 2024-11-23T15:24:46,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-23T15:24:46,504 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c9b8f1ec3b844b13a170a3cf43fff114 2024-11-23T15:24:46,514 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/0d4e6463017f42b68b53c44b76bf88dd is 50, key is test_row_1/B:col10/1732375484173/Put/seqid=0 2024-11-23T15:24:46,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741945_1121 (size=9857) 2024-11-23T15:24:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-23T15:24:46,918 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/0d4e6463017f42b68b53c44b76bf88dd 2024-11-23T15:24:46,926 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/b850829858f2413381cddaaf53731dde is 50, key is test_row_1/C:col10/1732375484173/Put/seqid=0 2024-11-23T15:24:46,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741946_1122 (size=9857) 2024-11-23T15:24:47,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-23T15:24:47,330 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/b850829858f2413381cddaaf53731dde 2024-11-23T15:24:47,335 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/A/c9b8f1ec3b844b13a170a3cf43fff114 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c9b8f1ec3b844b13a170a3cf43fff114 2024-11-23T15:24:47,340 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c9b8f1ec3b844b13a170a3cf43fff114, entries=100, sequenceid=504, filesize=9.6 K 2024-11-23T15:24:47,340 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/B/0d4e6463017f42b68b53c44b76bf88dd as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/0d4e6463017f42b68b53c44b76bf88dd 2024-11-23T15:24:47,344 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/0d4e6463017f42b68b53c44b76bf88dd, entries=100, sequenceid=504, filesize=9.6 K 2024-11-23T15:24:47,345 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/.tmp/C/b850829858f2413381cddaaf53731dde as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/b850829858f2413381cddaaf53731dde 2024-11-23T15:24:47,349 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/b850829858f2413381cddaaf53731dde, entries=100, sequenceid=504, filesize=9.6 K 2024-11-23T15:24:47,350 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for d6bd711ee7b1117306956b276de6b58d in 1256ms, sequenceid=504, compaction requested=true 2024-11-23T15:24:47,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e579954fcc08481095cb741b433be2a4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ee891497a652468087b83876b6c44b7b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/912b7f0cf1d942038d553477fdb845bc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/26c11edb4773456eb633defb4885fe9e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/018dd68209954569b79bc260523b9d24, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bf71f2056e424d7e8978d648f086ab93, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/3c816e3608bf447d953f8a35abdb9934, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/288940ce4f7644058181039831910ec8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f7da8f646972432aa4b1918f8ea6f70c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/5595114e44624010a6316ddc252afafd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/32b1e44119704d4fa575ab5bfe07f9c4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e0749eb2f0754d45b428588ce75e7545, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/cb3479e8a05b4f8ba315e9d17f9728ab, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0f24632235464f9bad380ab1194c90c6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f45bccbe5d074cf7bcb8dd8aeb5a8b6e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/5f008df8e1ed4e6c800e2cfe09601c0f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/d1457bc94fbb48b48d6d568d3f73a6ed, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ded405d2a4214442988161c923a3e570, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e80ab0ae06144da4ba9a402305f7775c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/4a4c069a6a2c425c8b3751d4ed88e556, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/379b1d66f4764d418273e5b7087deeef, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bbfd6d1a4dc7414d9332939e5a854da4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/174adfd0ef914bcda4810a7f1e7e1685, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/a77b70daf51d48149a1859bd35c45a06, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/fd7d6725596c4afabe6b19e933aeca0b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/8b6ff5ab9d774c34811cc1022b95c156, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c792d4cf10ba492e8da0c5603456881a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/7e80475c2e404fa2b391a21610935b90, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c4ff81ee4fff49348ba117b9dac81c79, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0c1c674cedae4bea96b286d0ccc0155d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/489133c465e748dc9f46b9b02adcbdc6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c9e9eaad4992426991489f948b01f849, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/63347417d465484788a125b3e17fe081] to archive 2024-11-23T15:24:47,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:24:47,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e579954fcc08481095cb741b433be2a4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e579954fcc08481095cb741b433be2a4 2024-11-23T15:24:47,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ee891497a652468087b83876b6c44b7b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ee891497a652468087b83876b6c44b7b 2024-11-23T15:24:47,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/912b7f0cf1d942038d553477fdb845bc to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/912b7f0cf1d942038d553477fdb845bc 2024-11-23T15:24:47,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/26c11edb4773456eb633defb4885fe9e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/26c11edb4773456eb633defb4885fe9e 2024-11-23T15:24:47,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/018dd68209954569b79bc260523b9d24 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/018dd68209954569b79bc260523b9d24 2024-11-23T15:24:47,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bf71f2056e424d7e8978d648f086ab93 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bf71f2056e424d7e8978d648f086ab93 2024-11-23T15:24:47,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/3c816e3608bf447d953f8a35abdb9934 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/3c816e3608bf447d953f8a35abdb9934 2024-11-23T15:24:47,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/288940ce4f7644058181039831910ec8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/288940ce4f7644058181039831910ec8 2024-11-23T15:24:47,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f7da8f646972432aa4b1918f8ea6f70c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f7da8f646972432aa4b1918f8ea6f70c 2024-11-23T15:24:47,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/5595114e44624010a6316ddc252afafd to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/5595114e44624010a6316ddc252afafd 2024-11-23T15:24:47,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/32b1e44119704d4fa575ab5bfe07f9c4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/32b1e44119704d4fa575ab5bfe07f9c4 2024-11-23T15:24:47,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e0749eb2f0754d45b428588ce75e7545 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e0749eb2f0754d45b428588ce75e7545 2024-11-23T15:24:47,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/cb3479e8a05b4f8ba315e9d17f9728ab to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/cb3479e8a05b4f8ba315e9d17f9728ab 2024-11-23T15:24:47,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0f24632235464f9bad380ab1194c90c6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0f24632235464f9bad380ab1194c90c6 2024-11-23T15:24:47,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f45bccbe5d074cf7bcb8dd8aeb5a8b6e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/f45bccbe5d074cf7bcb8dd8aeb5a8b6e 2024-11-23T15:24:47,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/5f008df8e1ed4e6c800e2cfe09601c0f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/5f008df8e1ed4e6c800e2cfe09601c0f 2024-11-23T15:24:47,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/d1457bc94fbb48b48d6d568d3f73a6ed to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/d1457bc94fbb48b48d6d568d3f73a6ed 2024-11-23T15:24:47,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ded405d2a4214442988161c923a3e570 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/ded405d2a4214442988161c923a3e570 2024-11-23T15:24:47,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e80ab0ae06144da4ba9a402305f7775c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/e80ab0ae06144da4ba9a402305f7775c 2024-11-23T15:24:47,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/4a4c069a6a2c425c8b3751d4ed88e556 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/4a4c069a6a2c425c8b3751d4ed88e556 2024-11-23T15:24:47,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/379b1d66f4764d418273e5b7087deeef to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/379b1d66f4764d418273e5b7087deeef 2024-11-23T15:24:47,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bbfd6d1a4dc7414d9332939e5a854da4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/bbfd6d1a4dc7414d9332939e5a854da4 2024-11-23T15:24:47,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/174adfd0ef914bcda4810a7f1e7e1685 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/174adfd0ef914bcda4810a7f1e7e1685 2024-11-23T15:24:47,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/a77b70daf51d48149a1859bd35c45a06 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/a77b70daf51d48149a1859bd35c45a06 2024-11-23T15:24:47,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/fd7d6725596c4afabe6b19e933aeca0b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/fd7d6725596c4afabe6b19e933aeca0b 2024-11-23T15:24:47,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/8b6ff5ab9d774c34811cc1022b95c156 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/8b6ff5ab9d774c34811cc1022b95c156 2024-11-23T15:24:47,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c792d4cf10ba492e8da0c5603456881a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c792d4cf10ba492e8da0c5603456881a 2024-11-23T15:24:47,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/7e80475c2e404fa2b391a21610935b90 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/7e80475c2e404fa2b391a21610935b90 2024-11-23T15:24:47,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c4ff81ee4fff49348ba117b9dac81c79 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c4ff81ee4fff49348ba117b9dac81c79 2024-11-23T15:24:47,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0c1c674cedae4bea96b286d0ccc0155d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/0c1c674cedae4bea96b286d0ccc0155d 2024-11-23T15:24:47,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/489133c465e748dc9f46b9b02adcbdc6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/489133c465e748dc9f46b9b02adcbdc6 2024-11-23T15:24:47,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c9e9eaad4992426991489f948b01f849 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c9e9eaad4992426991489f948b01f849 2024-11-23T15:24:47,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/63347417d465484788a125b3e17fe081 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/63347417d465484788a125b3e17fe081 2024-11-23T15:24:47,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/ddb667c32fef4d9682377b5f2988ca2e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/35941dfec8c24b19b14632f52495c6be, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/83e0108851094f4481c52372dbddb08b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9e712ac02d7846768861d28baf8dd11f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/cc769daab3164dc6a1ef008fe117679c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/fc82dfb2bdad4139864ac1c331588b11, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/32324e433d5c4e5d86a36709f9687023, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/0b2b1451ba524ff3a891b43e3e1ca7e7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/4ca52b70305c4b78bb9c03679a78d73e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3d724afe56f545578d002449e9a74ee4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dfde1948e3841dd9c882f7bf96ced7b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7f5fa19be31d4d078338a9db7780371a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/235d6ee79118430ebc1d437d12869347, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/bdcd6af87a774e3894b6d14b02cd1069, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dac953d3143491a963460ea4499c8aa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/e398089425714411a5446fc7b8ba951b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f44783d19a104490b1a9a0a4a1c56624, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7209624e503e44279fb5fef3198d40ea, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/22e456cb42484fbea5f9a7082fc4171c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f1a99b6c1a274279ab49d3b63c4435a8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/28d70fcecbfc48d2b32d856377bab704, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/efe2c3ee5afc4ee2b989e9e824a127af, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/b130dd77da2b4c9981cc33ff82d617dc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/746faef8e3864cc1880465bb012a61d3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/825c008af7594132a39c31e2d4354f93, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c5df623b9d1646afb89cad0b7019e15f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/db56faea3b8244208739d33d644422b4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/daa8143dac9d4ac8aed18cd93f559b27, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/6d76a9c690b7492e9601f7a430c6f155, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c4884e2e6fa442aeb9069551a5f4722b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/d688b5d3816547c5a1a5974523a4bb83, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/852ae807d2574119afffc328d3e436a4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9b41f9b3bc614ebd8cfce73664a65e08] to archive 2024-11-23T15:24:47,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:24:47,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/ddb667c32fef4d9682377b5f2988ca2e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/ddb667c32fef4d9682377b5f2988ca2e 2024-11-23T15:24:47,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/35941dfec8c24b19b14632f52495c6be to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/35941dfec8c24b19b14632f52495c6be 2024-11-23T15:24:47,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/83e0108851094f4481c52372dbddb08b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/83e0108851094f4481c52372dbddb08b 2024-11-23T15:24:47,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9e712ac02d7846768861d28baf8dd11f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9e712ac02d7846768861d28baf8dd11f 2024-11-23T15:24:47,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/cc769daab3164dc6a1ef008fe117679c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/cc769daab3164dc6a1ef008fe117679c 2024-11-23T15:24:47,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/fc82dfb2bdad4139864ac1c331588b11 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/fc82dfb2bdad4139864ac1c331588b11 2024-11-23T15:24:47,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/32324e433d5c4e5d86a36709f9687023 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/32324e433d5c4e5d86a36709f9687023 2024-11-23T15:24:47,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/0b2b1451ba524ff3a891b43e3e1ca7e7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/0b2b1451ba524ff3a891b43e3e1ca7e7 2024-11-23T15:24:47,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/4ca52b70305c4b78bb9c03679a78d73e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/4ca52b70305c4b78bb9c03679a78d73e 2024-11-23T15:24:47,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3d724afe56f545578d002449e9a74ee4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3d724afe56f545578d002449e9a74ee4 2024-11-23T15:24:47,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dfde1948e3841dd9c882f7bf96ced7b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dfde1948e3841dd9c882f7bf96ced7b 2024-11-23T15:24:47,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7f5fa19be31d4d078338a9db7780371a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7f5fa19be31d4d078338a9db7780371a 2024-11-23T15:24:47,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/235d6ee79118430ebc1d437d12869347 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/235d6ee79118430ebc1d437d12869347 2024-11-23T15:24:47,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/bdcd6af87a774e3894b6d14b02cd1069 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/bdcd6af87a774e3894b6d14b02cd1069 2024-11-23T15:24:47,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dac953d3143491a963460ea4499c8aa to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/3dac953d3143491a963460ea4499c8aa 2024-11-23T15:24:47,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/e398089425714411a5446fc7b8ba951b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/e398089425714411a5446fc7b8ba951b 2024-11-23T15:24:47,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f44783d19a104490b1a9a0a4a1c56624 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f44783d19a104490b1a9a0a4a1c56624 2024-11-23T15:24:47,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7209624e503e44279fb5fef3198d40ea to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/7209624e503e44279fb5fef3198d40ea 2024-11-23T15:24:47,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/22e456cb42484fbea5f9a7082fc4171c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/22e456cb42484fbea5f9a7082fc4171c 2024-11-23T15:24:47,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f1a99b6c1a274279ab49d3b63c4435a8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/f1a99b6c1a274279ab49d3b63c4435a8 2024-11-23T15:24:47,438 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/28d70fcecbfc48d2b32d856377bab704 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/28d70fcecbfc48d2b32d856377bab704 2024-11-23T15:24:47,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/efe2c3ee5afc4ee2b989e9e824a127af to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/efe2c3ee5afc4ee2b989e9e824a127af 2024-11-23T15:24:47,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/b130dd77da2b4c9981cc33ff82d617dc to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/b130dd77da2b4c9981cc33ff82d617dc 2024-11-23T15:24:47,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/746faef8e3864cc1880465bb012a61d3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/746faef8e3864cc1880465bb012a61d3 2024-11-23T15:24:47,442 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/825c008af7594132a39c31e2d4354f93 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/825c008af7594132a39c31e2d4354f93 2024-11-23T15:24:47,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c5df623b9d1646afb89cad0b7019e15f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c5df623b9d1646afb89cad0b7019e15f 2024-11-23T15:24:47,444 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/db56faea3b8244208739d33d644422b4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/db56faea3b8244208739d33d644422b4 2024-11-23T15:24:47,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/daa8143dac9d4ac8aed18cd93f559b27 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/daa8143dac9d4ac8aed18cd93f559b27 2024-11-23T15:24:47,447 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/6d76a9c690b7492e9601f7a430c6f155 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/6d76a9c690b7492e9601f7a430c6f155 2024-11-23T15:24:47,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c4884e2e6fa442aeb9069551a5f4722b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/c4884e2e6fa442aeb9069551a5f4722b 2024-11-23T15:24:47,449 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/d688b5d3816547c5a1a5974523a4bb83 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/d688b5d3816547c5a1a5974523a4bb83 2024-11-23T15:24:47,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/852ae807d2574119afffc328d3e436a4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/852ae807d2574119afffc328d3e436a4 2024-11-23T15:24:47,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9b41f9b3bc614ebd8cfce73664a65e08 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/9b41f9b3bc614ebd8cfce73664a65e08 2024-11-23T15:24:47,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/31e307a7b7a347cdb0fb130b2440d8f4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/09f513b327d84b02aa9a31d683d540c4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bf8a3b7f443c43debfd21881d760379f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6b53822c33124fe58e937df549f25ce5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bd6be0518dd448efab03260c4d0f92bd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/3ed30acab37b41cf9ba19b896bfdb1f1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/5611991eb11746ed9d8fcff6bbf0f134, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/53311e6fe2144d03b678581a8d617ed7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/077fb6a3345747249969d4a4cbe13ecd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0581a228a8844d548d43d6fb97c66410, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4e1c27dbfc9e4f2d91e6ffd7fa7a66b1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/feac5bd9c6dc439dba5b85e8aba2d3ac, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/8e7ea57107b94c2bad3861306eb906e6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6d88af0660934234acf464d8ef56bd2b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/35e179b2a7ce496d9eaf33f5fef4b704, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/17bbb7237a6b4ae68d1d210665708eb3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7df6696c55dd4805b2b611302e09d949, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4bdc19660d2f4f4a8cef5aa27e6a0509, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/57009fe9f4dd468ab2a5a77450d9d34d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4533e4c1829b45c996319d7c13a2d35e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0ad7203d568c47f79c5ac10e5b13f869, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/62ceb8703821403f9d822ee2a51f6181, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/45d439019d4d48a4a3bf848e7fd66ffa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/00230f55e52e417d817b90b6550b6715, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/200945a1a917429b9d3bcf4f6c45add1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/de0e3f535bae489884faf406d2b6a2d7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/e9fb149977634b15ad2f1fa5f21d2cd4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6cd707d0a1ac4bb4be6b7fbbe0406f45, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/90e070b5a72c427087b7bd3b5c92c289, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/e7d9a5ffde2f4599aec715024eb94b10, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/77dce7680f104a7ea031329ef42bb2ce, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/d346430b2ba5408589969cc7458224a9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7bf4d2d33b1b4bb692f551b26ccaead6] to archive 2024-11-23T15:24:47,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:24:47,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/31e307a7b7a347cdb0fb130b2440d8f4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/31e307a7b7a347cdb0fb130b2440d8f4 2024-11-23T15:24:47,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/09f513b327d84b02aa9a31d683d540c4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/09f513b327d84b02aa9a31d683d540c4 2024-11-23T15:24:47,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bf8a3b7f443c43debfd21881d760379f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bf8a3b7f443c43debfd21881d760379f 2024-11-23T15:24:47,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6b53822c33124fe58e937df549f25ce5 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6b53822c33124fe58e937df549f25ce5 2024-11-23T15:24:47,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bd6be0518dd448efab03260c4d0f92bd to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/bd6be0518dd448efab03260c4d0f92bd 2024-11-23T15:24:47,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/3ed30acab37b41cf9ba19b896bfdb1f1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/3ed30acab37b41cf9ba19b896bfdb1f1 2024-11-23T15:24:47,462 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/5611991eb11746ed9d8fcff6bbf0f134 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/5611991eb11746ed9d8fcff6bbf0f134 2024-11-23T15:24:47,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/53311e6fe2144d03b678581a8d617ed7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/53311e6fe2144d03b678581a8d617ed7 2024-11-23T15:24:47,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/077fb6a3345747249969d4a4cbe13ecd to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/077fb6a3345747249969d4a4cbe13ecd 2024-11-23T15:24:47,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0581a228a8844d548d43d6fb97c66410 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0581a228a8844d548d43d6fb97c66410 2024-11-23T15:24:47,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4e1c27dbfc9e4f2d91e6ffd7fa7a66b1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4e1c27dbfc9e4f2d91e6ffd7fa7a66b1 2024-11-23T15:24:47,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/feac5bd9c6dc439dba5b85e8aba2d3ac to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/feac5bd9c6dc439dba5b85e8aba2d3ac 2024-11-23T15:24:47,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/8e7ea57107b94c2bad3861306eb906e6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/8e7ea57107b94c2bad3861306eb906e6 2024-11-23T15:24:47,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6d88af0660934234acf464d8ef56bd2b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6d88af0660934234acf464d8ef56bd2b 2024-11-23T15:24:47,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/35e179b2a7ce496d9eaf33f5fef4b704 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/35e179b2a7ce496d9eaf33f5fef4b704 2024-11-23T15:24:47,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/17bbb7237a6b4ae68d1d210665708eb3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/17bbb7237a6b4ae68d1d210665708eb3 2024-11-23T15:24:47,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7df6696c55dd4805b2b611302e09d949 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7df6696c55dd4805b2b611302e09d949 2024-11-23T15:24:47,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4bdc19660d2f4f4a8cef5aa27e6a0509 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4bdc19660d2f4f4a8cef5aa27e6a0509 2024-11-23T15:24:47,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/57009fe9f4dd468ab2a5a77450d9d34d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/57009fe9f4dd468ab2a5a77450d9d34d 2024-11-23T15:24:47,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4533e4c1829b45c996319d7c13a2d35e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/4533e4c1829b45c996319d7c13a2d35e 2024-11-23T15:24:47,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0ad7203d568c47f79c5ac10e5b13f869 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/0ad7203d568c47f79c5ac10e5b13f869 2024-11-23T15:24:47,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/62ceb8703821403f9d822ee2a51f6181 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/62ceb8703821403f9d822ee2a51f6181 2024-11-23T15:24:47,481 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/45d439019d4d48a4a3bf848e7fd66ffa to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/45d439019d4d48a4a3bf848e7fd66ffa 2024-11-23T15:24:47,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/00230f55e52e417d817b90b6550b6715 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/00230f55e52e417d817b90b6550b6715 2024-11-23T15:24:47,483 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/200945a1a917429b9d3bcf4f6c45add1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/200945a1a917429b9d3bcf4f6c45add1 2024-11-23T15:24:47,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/de0e3f535bae489884faf406d2b6a2d7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/de0e3f535bae489884faf406d2b6a2d7 2024-11-23T15:24:47,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/e9fb149977634b15ad2f1fa5f21d2cd4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/e9fb149977634b15ad2f1fa5f21d2cd4 2024-11-23T15:24:47,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6cd707d0a1ac4bb4be6b7fbbe0406f45 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/6cd707d0a1ac4bb4be6b7fbbe0406f45 2024-11-23T15:24:47,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/90e070b5a72c427087b7bd3b5c92c289 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/90e070b5a72c427087b7bd3b5c92c289 2024-11-23T15:24:47,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/e7d9a5ffde2f4599aec715024eb94b10 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/e7d9a5ffde2f4599aec715024eb94b10 2024-11-23T15:24:47,489 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/77dce7680f104a7ea031329ef42bb2ce to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/77dce7680f104a7ea031329ef42bb2ce 2024-11-23T15:24:47,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/d346430b2ba5408589969cc7458224a9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/d346430b2ba5408589969cc7458224a9 2024-11-23T15:24:47,492 DEBUG [StoreCloser-TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7bf4d2d33b1b4bb692f551b26ccaead6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/7bf4d2d33b1b4bb692f551b26ccaead6 2024-11-23T15:24:47,496 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/recovered.edits/507.seqid, newMaxSeqId=507, maxSeqId=1 2024-11-23T15:24:47,499 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d. 2024-11-23T15:24:47,499 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for d6bd711ee7b1117306956b276de6b58d: 2024-11-23T15:24:47,501 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:47,501 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=d6bd711ee7b1117306956b276de6b58d, regionState=CLOSED 2024-11-23T15:24:47,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-23T15:24:47,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure d6bd711ee7b1117306956b276de6b58d, server=6a36843bf905,33811,1732375456985 in 1.5660 sec 2024-11-23T15:24:47,505 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-23T15:24:47,505 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d6bd711ee7b1117306956b276de6b58d, UNASSIGN in 1.5700 sec 2024-11-23T15:24:47,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-23T15:24:47,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5760 sec 2024-11-23T15:24:47,508 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375487508"}]},"ts":"1732375487508"} 2024-11-23T15:24:47,509 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T15:24:47,515 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T15:24:47,516 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5950 sec 2024-11-23T15:24:48,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-23T15:24:48,030 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-23T15:24:48,033 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T15:24:48,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:24:48,038 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:24:48,040 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=38, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:24:48,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-23T15:24:48,043 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:48,047 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/recovered.edits] 2024-11-23T15:24:48,050 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/30208c6f6c174bd79eefcde08a7b9447 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/30208c6f6c174bd79eefcde08a7b9447 2024-11-23T15:24:48,052 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/b4f658f77caf44a3af209f53d92c1b2a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/b4f658f77caf44a3af209f53d92c1b2a 2024-11-23T15:24:48,053 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c9b8f1ec3b844b13a170a3cf43fff114 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/A/c9b8f1ec3b844b13a170a3cf43fff114 2024-11-23T15:24:48,056 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/0d4e6463017f42b68b53c44b76bf88dd to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/0d4e6463017f42b68b53c44b76bf88dd 2024-11-23T15:24:48,058 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/860d44b0d25f4a01bf218fe20ae0eba6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/860d44b0d25f4a01bf218fe20ae0eba6 2024-11-23T15:24:48,060 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/a94468d33ca7485bb369f15ab63f77d8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/B/a94468d33ca7485bb369f15ab63f77d8 2024-11-23T15:24:48,063 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/248333e177f042a3a5f65b3107d6b3ea to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/248333e177f042a3a5f65b3107d6b3ea 2024-11-23T15:24:48,065 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/3e1832b29d654824993838b668f28e9c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/3e1832b29d654824993838b668f28e9c 2024-11-23T15:24:48,066 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/b850829858f2413381cddaaf53731dde to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/C/b850829858f2413381cddaaf53731dde 2024-11-23T15:24:48,070 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/recovered.edits/507.seqid to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d/recovered.edits/507.seqid 2024-11-23T15:24:48,070 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d6bd711ee7b1117306956b276de6b58d 2024-11-23T15:24:48,070 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T15:24:48,076 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=38, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:24:48,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-23T15:24:48,084 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T15:24:48,118 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T15:24:48,120 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=38, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:24:48,120 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T15:24:48,120 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732375488120"}]},"ts":"9223372036854775807"} 2024-11-23T15:24:48,123 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T15:24:48,123 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d6bd711ee7b1117306956b276de6b58d, NAME => 'TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T15:24:48,123 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T15:24:48,123 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732375488123"}]},"ts":"9223372036854775807"} 2024-11-23T15:24:48,126 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T15:24:48,128 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=38, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:24:48,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 94 msec 2024-11-23T15:24:48,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-23T15:24:48,141 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-11-23T15:24:48,158 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=239 (was 219) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1477673902_22 at /127.0.0.1:48112 [Waiting for operation #351] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-235677810_22 at /127.0.0.1:51616 [Waiting for operation #336] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/6a36843bf905:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS:0;6a36843bf905:33811-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=287 (was 253) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3950 (was 4487) 2024-11-23T15:24:48,168 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=239, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=287, ProcessCount=11, AvailableMemoryMB=3949 2024-11-23T15:24:48,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T15:24:48,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:24:48,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T15:24:48,173 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T15:24:48,173 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:48,173 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 39 2024-11-23T15:24:48,174 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T15:24:48,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-23T15:24:48,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741947_1123 (size=963) 2024-11-23T15:24:48,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-23T15:24:48,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-23T15:24:48,584 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704 2024-11-23T15:24:48,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741948_1124 (size=53) 2024-11-23T15:24:48,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-23T15:24:48,991 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:48,991 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing f0440d31fa3d850bbc6f1938601d069f, disabling compactions & flushes 2024-11-23T15:24:48,991 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:48,991 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:48,991 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. after waiting 0 ms 2024-11-23T15:24:48,991 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:48,991 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:48,991 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:48,992 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T15:24:48,992 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732375488992"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732375488992"}]},"ts":"1732375488992"} 2024-11-23T15:24:48,994 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T15:24:48,995 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T15:24:48,995 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375488995"}]},"ts":"1732375488995"} 2024-11-23T15:24:48,996 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T15:24:49,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, ASSIGN}] 2024-11-23T15:24:49,001 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, ASSIGN 2024-11-23T15:24:49,002 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, ASSIGN; state=OFFLINE, location=6a36843bf905,33811,1732375456985; forceNewPlan=false, retain=false 2024-11-23T15:24:49,152 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=f0440d31fa3d850bbc6f1938601d069f, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:49,154 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; OpenRegionProcedure f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:24:49,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-23T15:24:49,305 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:49,309 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:49,309 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7285): Opening region: {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:24:49,309 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:49,309 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:49,310 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7327): checking encryption for f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:49,310 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7330): checking classloading for f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:49,311 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:49,312 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:24:49,312 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f0440d31fa3d850bbc6f1938601d069f columnFamilyName A 2024-11-23T15:24:49,313 DEBUG [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:49,313 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(327): Store=f0440d31fa3d850bbc6f1938601d069f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:49,313 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:49,314 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:24:49,315 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f0440d31fa3d850bbc6f1938601d069f columnFamilyName B 2024-11-23T15:24:49,315 DEBUG [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:49,315 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(327): Store=f0440d31fa3d850bbc6f1938601d069f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:49,315 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:49,316 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:24:49,316 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f0440d31fa3d850bbc6f1938601d069f columnFamilyName C 2024-11-23T15:24:49,316 DEBUG [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:49,317 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(327): Store=f0440d31fa3d850bbc6f1938601d069f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:49,317 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:49,318 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:49,318 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:49,319 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:24:49,321 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1085): writing seq id for f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:49,323 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:24:49,323 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1102): Opened f0440d31fa3d850bbc6f1938601d069f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75278067, jitterRate=0.12173061072826385}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:24:49,324 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1001): Region open journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:49,325 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., pid=41, masterSystemTime=1732375489305 2024-11-23T15:24:49,326 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:49,326 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:49,326 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=f0440d31fa3d850bbc6f1938601d069f, regionState=OPEN, openSeqNum=2, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:49,329 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-23T15:24:49,329 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; OpenRegionProcedure f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 in 174 msec 2024-11-23T15:24:49,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-23T15:24:49,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, ASSIGN in 329 msec 2024-11-23T15:24:49,331 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T15:24:49,331 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375489331"}]},"ts":"1732375489331"} 2024-11-23T15:24:49,332 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T15:24:49,335 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T15:24:49,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1640 sec 2024-11-23T15:24:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-23T15:24:50,280 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-11-23T15:24:50,281 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a9b9802 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@118b007e 2024-11-23T15:24:50,285 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d29de25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:50,287 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:50,289 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:50,290 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T15:24:50,292 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T15:24:50,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T15:24:50,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:24:50,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-23T15:24:50,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741949_1125 (size=999) 2024-11-23T15:24:50,319 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-23T15:24:50,320 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-23T15:24:50,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T15:24:50,334 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, REOPEN/MOVE}] 2024-11-23T15:24:50,335 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, REOPEN/MOVE 2024-11-23T15:24:50,336 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=f0440d31fa3d850bbc6f1938601d069f, regionState=CLOSING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:50,337 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T15:24:50,337 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; CloseRegionProcedure f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:24:50,488 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:50,489 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(124): Close f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,489 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T15:24:50,489 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1681): Closing f0440d31fa3d850bbc6f1938601d069f, disabling compactions & flushes 2024-11-23T15:24:50,489 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:50,490 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:50,490 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. after waiting 0 ms 2024-11-23T15:24:50,490 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:50,494 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-23T15:24:50,494 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:50,495 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1635): Region close journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:50,495 WARN [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegionServer(3786): Not adding moved region record: f0440d31fa3d850bbc6f1938601d069f to self. 2024-11-23T15:24:50,496 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(170): Closed f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,497 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=f0440d31fa3d850bbc6f1938601d069f, regionState=CLOSED 2024-11-23T15:24:50,500 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-23T15:24:50,500 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; CloseRegionProcedure f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 in 161 msec 2024-11-23T15:24:50,500 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, REOPEN/MOVE; state=CLOSED, location=6a36843bf905,33811,1732375456985; forceNewPlan=false, retain=true 2024-11-23T15:24:50,651 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=f0440d31fa3d850bbc6f1938601d069f, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:50,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=44, state=RUNNABLE; OpenRegionProcedure f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:24:50,804 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:50,807 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:50,807 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7285): Opening region: {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:24:50,808 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,808 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:24:50,808 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7327): checking encryption for f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,808 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7330): checking classloading for f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,812 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,813 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:24:50,818 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f0440d31fa3d850bbc6f1938601d069f columnFamilyName A 2024-11-23T15:24:50,820 DEBUG [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:50,821 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(327): Store=f0440d31fa3d850bbc6f1938601d069f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:50,821 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,822 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:24:50,822 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f0440d31fa3d850bbc6f1938601d069f columnFamilyName B 2024-11-23T15:24:50,822 DEBUG [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:50,822 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(327): Store=f0440d31fa3d850bbc6f1938601d069f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:50,822 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,823 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:24:50,823 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f0440d31fa3d850bbc6f1938601d069f columnFamilyName C 2024-11-23T15:24:50,823 DEBUG [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:50,823 INFO [StoreOpener-f0440d31fa3d850bbc6f1938601d069f-1 {}] regionserver.HStore(327): Store=f0440d31fa3d850bbc6f1938601d069f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:24:50,824 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:50,824 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,825 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,827 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:24:50,828 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1085): writing seq id for f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,829 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1102): Opened f0440d31fa3d850bbc6f1938601d069f; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74589280, jitterRate=0.11146688461303711}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:24:50,829 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1001): Region open journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:50,830 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., pid=46, masterSystemTime=1732375490804 2024-11-23T15:24:50,832 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:50,832 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:50,832 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=f0440d31fa3d850bbc6f1938601d069f, regionState=OPEN, openSeqNum=5, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:24:50,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=44 2024-11-23T15:24:50,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=44, state=SUCCESS; OpenRegionProcedure f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 in 181 msec 2024-11-23T15:24:50,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-23T15:24:50,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, REOPEN/MOVE in 500 msec 2024-11-23T15:24:50,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-23T15:24:50,839 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 514 msec 2024-11-23T15:24:50,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 540 msec 2024-11-23T15:24:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-23T15:24:50,848 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cae6c5c to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79982672 2024-11-23T15:24:50,853 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@433e2b26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:50,854 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c820ef9 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b4bd1ba 2024-11-23T15:24:50,857 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:50,858 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b44b1e5 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@454f1431 2024-11-23T15:24:50,861 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190853fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:50,862 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-11-23T15:24:50,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:50,866 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-11-23T15:24:50,869 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2885d2d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:50,870 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-11-23T15:24:50,873 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:50,874 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-11-23T15:24:50,876 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:50,877 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-11-23T15:24:50,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:50,881 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a11164b to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c38ee58 2024-11-23T15:24:50,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b120d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:24:50,890 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:50,890 DEBUG [hconnection-0x7ee0707e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:50,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-23T15:24:50,891 DEBUG [hconnection-0x57263cc8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:50,891 DEBUG [hconnection-0x665ea150-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:50,891 DEBUG [hconnection-0x319be96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:50,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T15:24:50,892 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:50,892 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:50,892 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33980, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:50,892 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:50,893 DEBUG [hconnection-0x219c5acc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:50,893 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:50,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:50,894 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:50,898 DEBUG [hconnection-0x3f1760b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:50,899 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:50,899 DEBUG [hconnection-0x2262414-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:50,900 DEBUG [hconnection-0x7b6d404a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:50,900 DEBUG [hconnection-0x327b7041-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:24:50,901 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:50,901 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34008, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:50,901 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34036, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:50,901 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:24:50,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:50,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:24:50,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:24:50,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:50,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:24:50,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:50,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:24:50,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:50,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:50,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375550950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:50,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123680904e9c8084426b20d1e2294ba9cf2_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375490906/Put/seqid=0 2024-11-23T15:24:50,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:50,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375550952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:50,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:50,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375550962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:50,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:50,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33998 deadline: 1732375550983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:50,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T15:24:50,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741950_1126 (size=12154) 2024-11-23T15:24:50,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:50,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33976 deadline: 1732375550984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,045 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:51,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:51,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375551073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375551081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375551085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33998 deadline: 1732375551090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33976 deadline: 1732375551096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T15:24:51,199 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:51,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:51,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375551283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375551286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375551290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33976 deadline: 1732375551304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33998 deadline: 1732375551305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,353 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:51,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:51,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,395 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:51,400 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123680904e9c8084426b20d1e2294ba9cf2_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123680904e9c8084426b20d1e2294ba9cf2_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:51,403 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/12b8eef63a044b328496d4c655ef2418, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:51,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/12b8eef63a044b328496d4c655ef2418 is 175, key is test_row_0/A:col10/1732375490906/Put/seqid=0 2024-11-23T15:24:51,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741951_1127 (size=30955) 2024-11-23T15:24:51,423 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/12b8eef63a044b328496d4c655ef2418 2024-11-23T15:24:51,476 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/5d215038b9f243ae80156d480a0f0a79 is 50, key is test_row_0/B:col10/1732375490906/Put/seqid=0 2024-11-23T15:24:51,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741952_1128 (size=12001) 2024-11-23T15:24:51,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/5d215038b9f243ae80156d480a0f0a79 2024-11-23T15:24:51,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T15:24:51,508 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:51,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:51,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,509 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/0421148a5b2c4da2baf921cf3debb80f is 50, key is test_row_0/C:col10/1732375490906/Put/seqid=0 2024-11-23T15:24:51,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741953_1129 (size=12001) 2024-11-23T15:24:51,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375551591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375551591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375551599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33998 deadline: 1732375551612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:51,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33976 deadline: 1732375551613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,662 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:51,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:51,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,663 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,815 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:51,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:51,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/0421148a5b2c4da2baf921cf3debb80f 2024-11-23T15:24:51,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/12b8eef63a044b328496d4c655ef2418 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/12b8eef63a044b328496d4c655ef2418 2024-11-23T15:24:51,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/12b8eef63a044b328496d4c655ef2418, entries=150, sequenceid=16, filesize=30.2 K 2024-11-23T15:24:51,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/5d215038b9f243ae80156d480a0f0a79 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/5d215038b9f243ae80156d480a0f0a79 2024-11-23T15:24:51,969 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:51,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:51,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:51,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:51,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:51,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/5d215038b9f243ae80156d480a0f0a79, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T15:24:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/0421148a5b2c4da2baf921cf3debb80f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/0421148a5b2c4da2baf921cf3debb80f 2024-11-23T15:24:51,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/0421148a5b2c4da2baf921cf3debb80f, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T15:24:51,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for f0440d31fa3d850bbc6f1938601d069f in 1073ms, sequenceid=16, compaction requested=false 2024-11-23T15:24:51,985 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-23T15:24:51,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:51,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T15:24:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:52,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T15:24:52,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:24:52,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:52,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:24:52,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:52,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:24:52,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:52,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375552107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375552108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375552111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33976 deadline: 1732375552117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33998 deadline: 1732375552119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cb9111ad30c142a58b7af081696d00f8_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375490951/Put/seqid=0 2024-11-23T15:24:52,123 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:52,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:52,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,124 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741954_1130 (size=17034) 2024-11-23T15:24:52,142 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:52,148 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cb9111ad30c142a58b7af081696d00f8_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cb9111ad30c142a58b7af081696d00f8_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:52,151 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/ce3a025a03394b8f879fe2c36f1aa7d2, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:52,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/ce3a025a03394b8f879fe2c36f1aa7d2 is 175, key is test_row_0/A:col10/1732375490951/Put/seqid=0 2024-11-23T15:24:52,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741955_1131 (size=48139) 2024-11-23T15:24:52,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375552213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375552232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375552232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,277 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:52,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:52,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,278 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,430 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:52,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:52,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,431 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375552434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375552436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375552436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,568 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=44, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/ce3a025a03394b8f879fe2c36f1aa7d2 2024-11-23T15:24:52,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/70971355dde546e3844b4abf11c04472 is 50, key is test_row_0/B:col10/1732375490951/Put/seqid=0 2024-11-23T15:24:52,583 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:52,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:52,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,584 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741956_1132 (size=12001) 2024-11-23T15:24:52,659 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T15:24:52,660 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44690, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T15:24:52,737 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:52,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:52,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375552739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375552740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375552741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,891 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:52,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:52,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:52,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:52,892 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:52,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/70971355dde546e3844b4abf11c04472 2024-11-23T15:24:52,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T15:24:53,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/4b8f78c184ec45e3bd5bcf1001c0dbd5 is 50, key is test_row_0/C:col10/1732375490951/Put/seqid=0 2024-11-23T15:24:53,032 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T15:24:53,045 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:53,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:53,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:53,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:53,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:53,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:53,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:53,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741957_1133 (size=12001) 2024-11-23T15:24:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:53,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:53,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33998 deadline: 1732375553123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:53,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:53,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33976 deadline: 1732375553124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:53,198 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:53,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:53,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:53,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:53,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:53,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:53,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:53,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375553242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:53,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375553246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:53,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375553246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:53,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:53,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:53,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:53,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:53,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:53,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:53,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:53,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/4b8f78c184ec45e3bd5bcf1001c0dbd5 2024-11-23T15:24:53,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/ce3a025a03394b8f879fe2c36f1aa7d2 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ce3a025a03394b8f879fe2c36f1aa7d2 2024-11-23T15:24:53,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ce3a025a03394b8f879fe2c36f1aa7d2, entries=250, sequenceid=44, filesize=47.0 K 2024-11-23T15:24:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/70971355dde546e3844b4abf11c04472 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/70971355dde546e3844b4abf11c04472 2024-11-23T15:24:53,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/70971355dde546e3844b4abf11c04472, entries=150, sequenceid=44, filesize=11.7 K 2024-11-23T15:24:53,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/4b8f78c184ec45e3bd5bcf1001c0dbd5 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/4b8f78c184ec45e3bd5bcf1001c0dbd5 2024-11-23T15:24:53,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/4b8f78c184ec45e3bd5bcf1001c0dbd5, entries=150, sequenceid=44, filesize=11.7 K 2024-11-23T15:24:53,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for f0440d31fa3d850bbc6f1938601d069f in 1387ms, sequenceid=44, compaction requested=false 2024-11-23T15:24:53,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:53,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:53,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T15:24:53,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:53,508 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T15:24:53,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:24:53,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:53,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:24:53,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:53,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:24:53,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:53,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112311f893ffd172452985402b7efb126a21_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375492107/Put/seqid=0 2024-11-23T15:24:53,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741958_1134 (size=12154) 2024-11-23T15:24:53,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:53,939 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112311f893ffd172452985402b7efb126a21_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112311f893ffd172452985402b7efb126a21_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:53,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/2021aa193efb4b408d3c101c8546e417, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:53,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/2021aa193efb4b408d3c101c8546e417 is 175, key is test_row_0/A:col10/1732375492107/Put/seqid=0 2024-11-23T15:24:53,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741959_1135 (size=30955) 2024-11-23T15:24:54,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:54,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:54,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375554294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375554297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375554297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,352 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/2021aa193efb4b408d3c101c8546e417 2024-11-23T15:24:54,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/555d6e44cd5b4de591f446b830b9e552 is 50, key is test_row_0/B:col10/1732375492107/Put/seqid=0 2024-11-23T15:24:54,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741960_1136 (size=12001) 2024-11-23T15:24:54,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375554398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375554400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375554400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375554602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375554603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375554604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,769 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/555d6e44cd5b4de591f446b830b9e552 2024-11-23T15:24:54,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/7e3396e9ca7440e7a5daff27dd1714dd is 50, key is test_row_0/C:col10/1732375492107/Put/seqid=0 2024-11-23T15:24:54,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741961_1137 (size=12001) 2024-11-23T15:24:54,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375554904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375554906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:54,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375554909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:54,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T15:24:55,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:55,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33976 deadline: 1732375555138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:55,142 DEBUG [Thread-628 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:24:55,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:55,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33998 deadline: 1732375555144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:55,147 DEBUG [Thread-624 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4167 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:24:55,186 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/7e3396e9ca7440e7a5daff27dd1714dd 2024-11-23T15:24:55,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/2021aa193efb4b408d3c101c8546e417 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2021aa193efb4b408d3c101c8546e417 2024-11-23T15:24:55,198 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2021aa193efb4b408d3c101c8546e417, entries=150, sequenceid=52, filesize=30.2 K 2024-11-23T15:24:55,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/555d6e44cd5b4de591f446b830b9e552 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/555d6e44cd5b4de591f446b830b9e552 2024-11-23T15:24:55,205 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/555d6e44cd5b4de591f446b830b9e552, entries=150, sequenceid=52, filesize=11.7 K 2024-11-23T15:24:55,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/7e3396e9ca7440e7a5daff27dd1714dd as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e3396e9ca7440e7a5daff27dd1714dd 2024-11-23T15:24:55,211 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e3396e9ca7440e7a5daff27dd1714dd, entries=150, sequenceid=52, filesize=11.7 K 2024-11-23T15:24:55,212 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for f0440d31fa3d850bbc6f1938601d069f in 1703ms, sequenceid=52, compaction requested=true 2024-11-23T15:24:55,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:55,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:55,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-23T15:24:55,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-23T15:24:55,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-23T15:24:55,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.3200 sec 2024-11-23T15:24:55,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 4.3250 sec 2024-11-23T15:24:55,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:55,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-23T15:24:55,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:24:55,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:55,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:24:55,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:55,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:24:55,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:55,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:55,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375555416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:55,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:55,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375555416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:55,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:55,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375555417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:55,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112311b55d67cb6a48afa4de98197b5fa333_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375494292/Put/seqid=0 2024-11-23T15:24:55,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741962_1138 (size=14594) 2024-11-23T15:24:55,432 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:55,437 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112311b55d67cb6a48afa4de98197b5fa333_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112311b55d67cb6a48afa4de98197b5fa333_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:55,439 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/d0d39c0b2a5a424299056198090a18a1, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:55,439 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/d0d39c0b2a5a424299056198090a18a1 is 175, key is test_row_0/A:col10/1732375494292/Put/seqid=0 2024-11-23T15:24:55,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741963_1139 (size=39549) 2024-11-23T15:24:55,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:55,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375555518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:55,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:55,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375555519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:55,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375555721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:55,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375555722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:55,845 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=82, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/d0d39c0b2a5a424299056198090a18a1 2024-11-23T15:24:55,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/deff05ae80404b0c98211e57a668fa35 is 50, key is test_row_0/B:col10/1732375494292/Put/seqid=0 2024-11-23T15:24:55,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741964_1140 (size=12001) 2024-11-23T15:24:56,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375556025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375556027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/deff05ae80404b0c98211e57a668fa35 2024-11-23T15:24:56,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/7fb6568336694dcdbec250d14a3e5478 is 50, key is test_row_0/C:col10/1732375494292/Put/seqid=0 2024-11-23T15:24:56,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741965_1141 (size=12001) 2024-11-23T15:24:56,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/7fb6568336694dcdbec250d14a3e5478 2024-11-23T15:24:56,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/d0d39c0b2a5a424299056198090a18a1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d0d39c0b2a5a424299056198090a18a1 2024-11-23T15:24:56,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d0d39c0b2a5a424299056198090a18a1, entries=200, sequenceid=82, filesize=38.6 K 2024-11-23T15:24:56,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/deff05ae80404b0c98211e57a668fa35 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/deff05ae80404b0c98211e57a668fa35 2024-11-23T15:24:56,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/deff05ae80404b0c98211e57a668fa35, entries=150, sequenceid=82, filesize=11.7 K 2024-11-23T15:24:56,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/7fb6568336694dcdbec250d14a3e5478 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7fb6568336694dcdbec250d14a3e5478 2024-11-23T15:24:56,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7fb6568336694dcdbec250d14a3e5478, entries=150, sequenceid=82, filesize=11.7 K 2024-11-23T15:24:56,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for f0440d31fa3d850bbc6f1938601d069f in 889ms, sequenceid=82, compaction requested=true 2024-11-23T15:24:56,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:56,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:56,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:56,299 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:24:56,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:56,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:56,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:56,299 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:24:56,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:56,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,301 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:24:56,301 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/B is initiating minor compaction (all files) 2024-11-23T15:24:56,301 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/B in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:56,301 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/5d215038b9f243ae80156d480a0f0a79, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/70971355dde546e3844b4abf11c04472, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/555d6e44cd5b4de591f446b830b9e552, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/deff05ae80404b0c98211e57a668fa35] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=46.9 K 2024-11-23T15:24:56,302 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 149598 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:24:56,302 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/A is initiating minor compaction (all files) 2024-11-23T15:24:56,302 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/A in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:56,302 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/12b8eef63a044b328496d4c655ef2418, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ce3a025a03394b8f879fe2c36f1aa7d2, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2021aa193efb4b408d3c101c8546e417, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d0d39c0b2a5a424299056198090a18a1] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=146.1 K 2024-11-23T15:24:56,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,302 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:56,302 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/12b8eef63a044b328496d4c655ef2418, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ce3a025a03394b8f879fe2c36f1aa7d2, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2021aa193efb4b408d3c101c8546e417, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d0d39c0b2a5a424299056198090a18a1] 2024-11-23T15:24:56,302 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d215038b9f243ae80156d480a0f0a79, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732375490906 2024-11-23T15:24:56,303 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12b8eef63a044b328496d4c655ef2418, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732375490906 2024-11-23T15:24:56,303 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 70971355dde546e3844b4abf11c04472, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732375490951 2024-11-23T15:24:56,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,304 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 555d6e44cd5b4de591f446b830b9e552, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732375492102 2024-11-23T15:24:56,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,304 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce3a025a03394b8f879fe2c36f1aa7d2, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732375490945 2024-11-23T15:24:56,305 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2021aa193efb4b408d3c101c8546e417, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732375492102 2024-11-23T15:24:56,305 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting deff05ae80404b0c98211e57a668fa35, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732375494292 2024-11-23T15:24:56,305 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0d39c0b2a5a424299056198090a18a1, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732375494292 2024-11-23T15:24:56,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,317 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:56,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,320 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411239474e1a20be04f098dfd99daddc6db50_f0440d31fa3d850bbc6f1938601d069f store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:56,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,320 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#B#compaction#120 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:56,321 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/d17af16858704b1dbf3febd6cef27166 is 50, key is test_row_0/B:col10/1732375494292/Put/seqid=0 2024-11-23T15:24:56,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,324 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411239474e1a20be04f098dfd99daddc6db50_f0440d31fa3d850bbc6f1938601d069f, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:56,324 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239474e1a20be04f098dfd99daddc6db50_f0440d31fa3d850bbc6f1938601d069f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:56,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741966_1142 (size=12139) 2024-11-23T15:24:56,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741967_1143 (size=4469) 2024-11-23T15:24:56,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,356 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#A#compaction#121 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:56,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,358 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/e64b29092e7246b3b1d2fc52954c57b4 is 175, key is test_row_0/A:col10/1732375494292/Put/seqid=0 2024-11-23T15:24:56,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741968_1144 (size=31093) 2024-11-23T15:24:56,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:56,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:24:56,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:24:56,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:56,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:24:56,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:56,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:24:56,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:56,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235bb0167a12cc4afba780d51ea690bec5_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375496450/Put/seqid=0 2024-11-23T15:24:56,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741969_1145 (size=21918) 2024-11-23T15:24:56,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375556550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375556551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375556552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375556655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375556655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375556656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,757 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/d17af16858704b1dbf3febd6cef27166 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/d17af16858704b1dbf3febd6cef27166 2024-11-23T15:24:56,766 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/B of f0440d31fa3d850bbc6f1938601d069f into d17af16858704b1dbf3febd6cef27166(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:56,766 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:56,766 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/B, priority=12, startTime=1732375496299; duration=0sec 2024-11-23T15:24:56,766 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:56,766 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:B 2024-11-23T15:24:56,766 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:24:56,767 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:24:56,767 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/C is initiating minor compaction (all files) 2024-11-23T15:24:56,768 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/C in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:56,768 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/0421148a5b2c4da2baf921cf3debb80f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/4b8f78c184ec45e3bd5bcf1001c0dbd5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e3396e9ca7440e7a5daff27dd1714dd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7fb6568336694dcdbec250d14a3e5478] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=46.9 K 2024-11-23T15:24:56,768 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0421148a5b2c4da2baf921cf3debb80f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732375490906 2024-11-23T15:24:56,769 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b8f78c184ec45e3bd5bcf1001c0dbd5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1732375490951 2024-11-23T15:24:56,769 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e3396e9ca7440e7a5daff27dd1714dd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732375492102 2024-11-23T15:24:56,769 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fb6568336694dcdbec250d14a3e5478, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732375494292 2024-11-23T15:24:56,779 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#C#compaction#123 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:56,780 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/fb95697ec3bf4f9aaa9581d694843c76 is 50, key is test_row_0/C:col10/1732375494292/Put/seqid=0 2024-11-23T15:24:56,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741970_1146 (size=12139) 2024-11-23T15:24:56,788 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/e64b29092e7246b3b1d2fc52954c57b4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/e64b29092e7246b3b1d2fc52954c57b4 2024-11-23T15:24:56,793 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/A of f0440d31fa3d850bbc6f1938601d069f into e64b29092e7246b3b1d2fc52954c57b4(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:56,793 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:56,794 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/A, priority=12, startTime=1732375496298; duration=0sec 2024-11-23T15:24:56,794 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:56,794 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:A 2024-11-23T15:24:56,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375556858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375556859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:56,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375556859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:56,900 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:56,905 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235bb0167a12cc4afba780d51ea690bec5_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235bb0167a12cc4afba780d51ea690bec5_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:56,906 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/45820ca9b5ec4658b00b5f8bdff63770, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:56,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/45820ca9b5ec4658b00b5f8bdff63770 is 175, key is test_row_0/A:col10/1732375496450/Put/seqid=0 2024-11-23T15:24:56,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741971_1147 (size=65323) 2024-11-23T15:24:56,916 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/45820ca9b5ec4658b00b5f8bdff63770 2024-11-23T15:24:56,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/298eb9a657564231815973713e64a870 is 50, key is test_row_0/B:col10/1732375496450/Put/seqid=0 2024-11-23T15:24:56,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741972_1148 (size=12001) 2024-11-23T15:24:57,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:57,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375557162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:57,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:57,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375557162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:57,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:57,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375557163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:57,192 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/fb95697ec3bf4f9aaa9581d694843c76 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/fb95697ec3bf4f9aaa9581d694843c76 2024-11-23T15:24:57,199 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/C of f0440d31fa3d850bbc6f1938601d069f into fb95697ec3bf4f9aaa9581d694843c76(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:24:57,199 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:57,199 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/C, priority=12, startTime=1732375496299; duration=0sec 2024-11-23T15:24:57,199 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:57,200 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:C 2024-11-23T15:24:57,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/298eb9a657564231815973713e64a870 2024-11-23T15:24:57,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/5ab12513226643d8b80903e69caca199 is 50, key is test_row_0/C:col10/1732375496450/Put/seqid=0 2024-11-23T15:24:57,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741973_1149 (size=12001) 2024-11-23T15:24:57,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:57,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375557664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:57,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:57,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375557666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:57,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:57,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375557668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:57,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/5ab12513226643d8b80903e69caca199 2024-11-23T15:24:57,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/45820ca9b5ec4658b00b5f8bdff63770 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/45820ca9b5ec4658b00b5f8bdff63770 2024-11-23T15:24:57,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/45820ca9b5ec4658b00b5f8bdff63770, entries=350, sequenceid=93, filesize=63.8 K 2024-11-23T15:24:57,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/298eb9a657564231815973713e64a870 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/298eb9a657564231815973713e64a870 2024-11-23T15:24:57,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/298eb9a657564231815973713e64a870, entries=150, sequenceid=93, filesize=11.7 K 2024-11-23T15:24:57,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/5ab12513226643d8b80903e69caca199 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/5ab12513226643d8b80903e69caca199 2024-11-23T15:24:57,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/5ab12513226643d8b80903e69caca199, entries=150, sequenceid=93, filesize=11.7 K 2024-11-23T15:24:57,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f0440d31fa3d850bbc6f1938601d069f in 1328ms, sequenceid=93, compaction requested=false 2024-11-23T15:24:57,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:58,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:58,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T15:24:58,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:24:58,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:58,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:24:58,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:58,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:24:58,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:24:58,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230a1b445a793d49f8aa8e4e4d4ab1757b_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375496542/Put/seqid=0 2024-11-23T15:24:58,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:58,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375558680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:58,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:58,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375558680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:58,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:58,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375558681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:58,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741974_1150 (size=14594) 2024-11-23T15:24:58,686 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:24:58,691 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230a1b445a793d49f8aa8e4e4d4ab1757b_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230a1b445a793d49f8aa8e4e4d4ab1757b_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:24:58,692 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/bd5f8701470d4df5b82a94b4f873deda, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:58,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/bd5f8701470d4df5b82a94b4f873deda is 175, key is test_row_0/A:col10/1732375496542/Put/seqid=0 2024-11-23T15:24:58,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741975_1151 (size=39549) 2024-11-23T15:24:58,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:58,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375558782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:58,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:58,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375558782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:58,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:58,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375558784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:58,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:58,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375558985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:58,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:58,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375558986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:58,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:58,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375558987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:58,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T15:24:59,000 INFO [Thread-630 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-23T15:24:59,001 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:24:59,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-23T15:24:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T15:24:59,003 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:24:59,003 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:24:59,004 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:24:59,098 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=122, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/bd5f8701470d4df5b82a94b4f873deda 2024-11-23T15:24:59,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T15:24:59,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/89574a63d5e74416949991dd137234a5 is 50, key is test_row_0/B:col10/1732375496542/Put/seqid=0 2024-11-23T15:24:59,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741976_1152 (size=12001) 2024-11-23T15:24:59,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33976 deadline: 1732375559153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,155 DEBUG [Thread-628 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:24:59,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T15:24:59,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:59,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:59,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33998 deadline: 1732375559169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,171 DEBUG [Thread-624 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8191 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:24:59,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:59,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375559290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:59,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375559290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:59,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375559291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T15:24:59,308 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T15:24:59,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:59,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,461 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T15:24:59,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:59,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/89574a63d5e74416949991dd137234a5 2024-11-23T15:24:59,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/bdf9429412824d6fa5a8ba6f98b89974 is 50, key is test_row_0/C:col10/1732375496542/Put/seqid=0 2024-11-23T15:24:59,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741977_1153 (size=12001) 2024-11-23T15:24:59,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T15:24:59,615 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T15:24:59,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:59,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,767 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T15:24:59,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:59,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:59,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375559794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375559794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:24:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375559796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,920 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:24:59,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T15:24:59,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:24:59,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:24:59,926 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/bdf9429412824d6fa5a8ba6f98b89974 2024-11-23T15:24:59,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/bd5f8701470d4df5b82a94b4f873deda as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/bd5f8701470d4df5b82a94b4f873deda 2024-11-23T15:24:59,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/bd5f8701470d4df5b82a94b4f873deda, entries=200, sequenceid=122, filesize=38.6 K 2024-11-23T15:24:59,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/89574a63d5e74416949991dd137234a5 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/89574a63d5e74416949991dd137234a5 2024-11-23T15:24:59,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/89574a63d5e74416949991dd137234a5, entries=150, sequenceid=122, filesize=11.7 K 2024-11-23T15:24:59,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/bdf9429412824d6fa5a8ba6f98b89974 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/bdf9429412824d6fa5a8ba6f98b89974 2024-11-23T15:24:59,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/bdf9429412824d6fa5a8ba6f98b89974, entries=150, sequenceid=122, filesize=11.7 K 2024-11-23T15:24:59,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f0440d31fa3d850bbc6f1938601d069f in 1279ms, sequenceid=122, compaction requested=true 2024-11-23T15:24:59,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:24:59,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:24:59,949 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:59,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:59,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:24:59,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:24:59,950 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:24:59,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:24:59,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:24:59,952 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 135965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:59,952 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:24:59,952 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/A is initiating minor compaction (all files) 2024-11-23T15:24:59,952 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/B is initiating minor compaction (all files) 2024-11-23T15:24:59,952 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/B in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,952 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/A in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,953 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/e64b29092e7246b3b1d2fc52954c57b4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/45820ca9b5ec4658b00b5f8bdff63770, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/bd5f8701470d4df5b82a94b4f873deda] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=132.8 K 2024-11-23T15:24:59,953 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:24:59,953 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/e64b29092e7246b3b1d2fc52954c57b4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/45820ca9b5ec4658b00b5f8bdff63770, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/bd5f8701470d4df5b82a94b4f873deda] 2024-11-23T15:24:59,954 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/d17af16858704b1dbf3febd6cef27166, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/298eb9a657564231815973713e64a870, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/89574a63d5e74416949991dd137234a5] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=35.3 K 2024-11-23T15:24:59,954 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting d17af16858704b1dbf3febd6cef27166, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732375494292 2024-11-23T15:24:59,954 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e64b29092e7246b3b1d2fc52954c57b4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732375494292 2024-11-23T15:24:59,955 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 298eb9a657564231815973713e64a870, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732375496443 2024-11-23T15:24:59,955 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45820ca9b5ec4658b00b5f8bdff63770, keycount=350, bloomtype=ROW, size=63.8 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732375495414 2024-11-23T15:24:59,955 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 89574a63d5e74416949991dd137234a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732375496489 2024-11-23T15:24:59,956 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd5f8701470d4df5b82a94b4f873deda, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732375496489 2024-11-23T15:24:59,964 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#B#compaction#129 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:24:59,964 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:59,965 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/71baec5dfa0a4c9489c3bfd1a9395675 is 50, key is test_row_0/B:col10/1732375496542/Put/seqid=0 2024-11-23T15:24:59,967 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411235f0b0613ba93423f8abe5c0ef590c9a2_f0440d31fa3d850bbc6f1938601d069f store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:59,969 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411235f0b0613ba93423f8abe5c0ef590c9a2_f0440d31fa3d850bbc6f1938601d069f, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:59,970 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235f0b0613ba93423f8abe5c0ef590c9a2_f0440d31fa3d850bbc6f1938601d069f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:24:59,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741978_1154 (size=12241) 2024-11-23T15:25:00,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741979_1155 (size=4469) 2024-11-23T15:25:00,002 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#A#compaction#130 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:00,003 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/d66fac1d261c4dd9aaccdf5b66bfff4f is 175, key is test_row_0/A:col10/1732375496542/Put/seqid=0 2024-11-23T15:25:00,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741980_1156 (size=31195) 2024-11-23T15:25:00,073 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:00,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T15:25:00,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:00,074 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T15:25:00,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:25:00,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:00,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:25:00,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:00,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:25:00,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:00,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c5ea1b46eea9485cafa810f90a9ec9f0_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375498674/Put/seqid=0 2024-11-23T15:25:00,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741981_1157 (size=12254) 2024-11-23T15:25:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T15:25:00,407 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/71baec5dfa0a4c9489c3bfd1a9395675 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/71baec5dfa0a4c9489c3bfd1a9395675 2024-11-23T15:25:00,419 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/d66fac1d261c4dd9aaccdf5b66bfff4f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d66fac1d261c4dd9aaccdf5b66bfff4f 2024-11-23T15:25:00,420 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/B of f0440d31fa3d850bbc6f1938601d069f into 71baec5dfa0a4c9489c3bfd1a9395675(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:00,420 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:00,422 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/B, priority=13, startTime=1732375499949; duration=0sec 2024-11-23T15:25:00,422 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:00,422 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:B 2024-11-23T15:25:00,422 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:00,424 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:00,424 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/C is initiating minor compaction (all files) 2024-11-23T15:25:00,424 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/C in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:00,424 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/fb95697ec3bf4f9aaa9581d694843c76, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/5ab12513226643d8b80903e69caca199, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/bdf9429412824d6fa5a8ba6f98b89974] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=35.3 K 2024-11-23T15:25:00,425 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting fb95697ec3bf4f9aaa9581d694843c76, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732375494292 2024-11-23T15:25:00,425 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ab12513226643d8b80903e69caca199, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732375496443 2024-11-23T15:25:00,426 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/A of f0440d31fa3d850bbc6f1938601d069f into d66fac1d261c4dd9aaccdf5b66bfff4f(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:00,426 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:00,426 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/A, priority=13, startTime=1732375499949; duration=0sec 2024-11-23T15:25:00,426 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:00,426 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:A 2024-11-23T15:25:00,426 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting bdf9429412824d6fa5a8ba6f98b89974, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732375496489 2024-11-23T15:25:00,434 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#C#compaction#132 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:00,435 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/42f0fbf771194f1bb357f6514afaf4a6 is 50, key is test_row_0/C:col10/1732375496542/Put/seqid=0 2024-11-23T15:25:00,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741982_1158 (size=12241) 2024-11-23T15:25:00,471 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/42f0fbf771194f1bb357f6514afaf4a6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/42f0fbf771194f1bb357f6514afaf4a6 2024-11-23T15:25:00,479 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/C of f0440d31fa3d850bbc6f1938601d069f into 42f0fbf771194f1bb357f6514afaf4a6(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:00,479 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:00,479 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/C, priority=13, startTime=1732375499950; duration=0sec 2024-11-23T15:25:00,479 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:00,479 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:C 2024-11-23T15:25:00,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:00,500 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c5ea1b46eea9485cafa810f90a9ec9f0_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c5ea1b46eea9485cafa810f90a9ec9f0_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:00,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/b405604ebba64d579afb9585a76e49b1, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:00,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/b405604ebba64d579afb9585a76e49b1 is 175, key is test_row_0/A:col10/1732375498674/Put/seqid=0 2024-11-23T15:25:00,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741983_1159 (size=31055) 2024-11-23T15:25:00,508 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/b405604ebba64d579afb9585a76e49b1 2024-11-23T15:25:00,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/7d96ff53eabf47b9bf5ae7b90e4f8082 is 50, key is test_row_0/B:col10/1732375498674/Put/seqid=0 2024-11-23T15:25:00,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741984_1160 (size=12101) 2024-11-23T15:25:00,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:00,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:00,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:00,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375560822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:00,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:00,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375560823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:00,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:00,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375560824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:00,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375560925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:00,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375560926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:00,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375560926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:00,940 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/7d96ff53eabf47b9bf5ae7b90e4f8082 2024-11-23T15:25:00,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/3368444e635840d0ad653085e32d5434 is 50, key is test_row_0/C:col10/1732375498674/Put/seqid=0 2024-11-23T15:25:00,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741985_1161 (size=12101) 2024-11-23T15:25:00,954 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/3368444e635840d0ad653085e32d5434 2024-11-23T15:25:00,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/b405604ebba64d579afb9585a76e49b1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/b405604ebba64d579afb9585a76e49b1 2024-11-23T15:25:00,965 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/b405604ebba64d579afb9585a76e49b1, entries=150, sequenceid=132, filesize=30.3 K 2024-11-23T15:25:00,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/7d96ff53eabf47b9bf5ae7b90e4f8082 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/7d96ff53eabf47b9bf5ae7b90e4f8082 2024-11-23T15:25:00,973 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/7d96ff53eabf47b9bf5ae7b90e4f8082, entries=150, sequenceid=132, filesize=11.8 K 2024-11-23T15:25:00,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/3368444e635840d0ad653085e32d5434 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/3368444e635840d0ad653085e32d5434 2024-11-23T15:25:00,980 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/3368444e635840d0ad653085e32d5434, entries=150, sequenceid=132, filesize=11.8 K 2024-11-23T15:25:00,981 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for f0440d31fa3d850bbc6f1938601d069f in 907ms, sequenceid=132, compaction requested=false 2024-11-23T15:25:00,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:00,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:00,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-23T15:25:00,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-23T15:25:00,986 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-23T15:25:00,986 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9800 sec 2024-11-23T15:25:00,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.9850 sec 2024-11-23T15:25:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T15:25:01,107 INFO [Thread-630 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-23T15:25:01,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-23T15:25:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T15:25:01,110 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:01,110 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:01,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:01,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T15:25:01,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:25:01,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:01,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:25:01,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:01,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:25:01,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:01,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112315fecaf745ac409e9498f7d6d764c1f2_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375501129/Put/seqid=0 2024-11-23T15:25:01,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741986_1162 (size=14794) 2024-11-23T15:25:01,145 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:01,149 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112315fecaf745ac409e9498f7d6d764c1f2_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112315fecaf745ac409e9498f7d6d764c1f2_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:01,150 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/a00091b266fb4242aea95ed99a1d4b33, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:01,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/a00091b266fb4242aea95ed99a1d4b33 is 175, key is test_row_0/A:col10/1732375501129/Put/seqid=0 2024-11-23T15:25:01,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741987_1163 (size=39749) 2024-11-23T15:25:01,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375561148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375561149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375561160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T15:25:01,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375561256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375561256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,262 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T15:25:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:01,263 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375561263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T15:25:01,415 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T15:25:01,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:01,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:01,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:01,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375561459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375561459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375561465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,555 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=164, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/a00091b266fb4242aea95ed99a1d4b33 2024-11-23T15:25:01,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/b034885a5d8b4d64b0d180515b63f7a6 is 50, key is test_row_0/B:col10/1732375501129/Put/seqid=0 2024-11-23T15:25:01,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741988_1164 (size=12151) 2024-11-23T15:25:01,568 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T15:25:01,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:01,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:01,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:01,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T15:25:01,721 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T15:25:01,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:01,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:01,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:01,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375561761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375561761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:01,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375561768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,874 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:01,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T15:25:01,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:01,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:01,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:01,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:01,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/b034885a5d8b4d64b0d180515b63f7a6 2024-11-23T15:25:01,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/8b2d992d51954a8da937bd9f35a8b71b is 50, key is test_row_0/C:col10/1732375501129/Put/seqid=0 2024-11-23T15:25:01,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741989_1165 (size=12151) 2024-11-23T15:25:02,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:02,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T15:25:02,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:02,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:02,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:02,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:02,100 INFO [master/6a36843bf905:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T15:25:02,100 INFO [master/6a36843bf905:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T15:25:02,180 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:02,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T15:25:02,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:02,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,181 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:02,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:02,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:02,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T15:25:02,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:02,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375562265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:02,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:02,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375562267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:02,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:02,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375562272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:02,333 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:02,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T15:25:02,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:02,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:02,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:02,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:02,384 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/8b2d992d51954a8da937bd9f35a8b71b 2024-11-23T15:25:02,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/a00091b266fb4242aea95ed99a1d4b33 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/a00091b266fb4242aea95ed99a1d4b33 2024-11-23T15:25:02,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/a00091b266fb4242aea95ed99a1d4b33, entries=200, sequenceid=164, filesize=38.8 K 2024-11-23T15:25:02,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/b034885a5d8b4d64b0d180515b63f7a6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b034885a5d8b4d64b0d180515b63f7a6 2024-11-23T15:25:02,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b034885a5d8b4d64b0d180515b63f7a6, entries=150, sequenceid=164, filesize=11.9 K 2024-11-23T15:25:02,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/8b2d992d51954a8da937bd9f35a8b71b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/8b2d992d51954a8da937bd9f35a8b71b 2024-11-23T15:25:02,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/8b2d992d51954a8da937bd9f35a8b71b, entries=150, sequenceid=164, filesize=11.9 K 2024-11-23T15:25:02,405 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=40.25 KB/41220 for f0440d31fa3d850bbc6f1938601d069f in 1276ms, sequenceid=164, compaction requested=true 2024-11-23T15:25:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:02,406 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:02,406 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:02,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:02,407 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:02,407 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/A is initiating minor compaction (all files) 2024-11-23T15:25:02,407 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:02,407 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/B is initiating minor compaction (all files) 2024-11-23T15:25:02,407 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/B in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,407 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/A in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,407 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/71baec5dfa0a4c9489c3bfd1a9395675, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/7d96ff53eabf47b9bf5ae7b90e4f8082, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b034885a5d8b4d64b0d180515b63f7a6] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=35.6 K 2024-11-23T15:25:02,407 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d66fac1d261c4dd9aaccdf5b66bfff4f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/b405604ebba64d579afb9585a76e49b1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/a00091b266fb4242aea95ed99a1d4b33] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=99.6 K 2024-11-23T15:25:02,407 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,407 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d66fac1d261c4dd9aaccdf5b66bfff4f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/b405604ebba64d579afb9585a76e49b1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/a00091b266fb4242aea95ed99a1d4b33] 2024-11-23T15:25:02,408 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 71baec5dfa0a4c9489c3bfd1a9395675, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732375496489 2024-11-23T15:25:02,408 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d66fac1d261c4dd9aaccdf5b66bfff4f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732375496489 2024-11-23T15:25:02,408 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d96ff53eabf47b9bf5ae7b90e4f8082, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732375498674 2024-11-23T15:25:02,408 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b405604ebba64d579afb9585a76e49b1, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732375498674 2024-11-23T15:25:02,409 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b034885a5d8b4d64b0d180515b63f7a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732375501128 2024-11-23T15:25:02,409 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a00091b266fb4242aea95ed99a1d4b33, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732375500814 2024-11-23T15:25:02,422 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#B#compaction#138 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:02,423 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/3ef43b17405b4d04ad6855c7326332a4 is 50, key is test_row_0/B:col10/1732375501129/Put/seqid=0 2024-11-23T15:25:02,432 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:02,439 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411238dd0af9dca694e04818910b8eb3d95bd_f0440d31fa3d850bbc6f1938601d069f store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:02,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741990_1166 (size=12493) 2024-11-23T15:25:02,441 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411238dd0af9dca694e04818910b8eb3d95bd_f0440d31fa3d850bbc6f1938601d069f, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:02,442 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238dd0af9dca694e04818910b8eb3d95bd_f0440d31fa3d850bbc6f1938601d069f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:02,446 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/3ef43b17405b4d04ad6855c7326332a4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/3ef43b17405b4d04ad6855c7326332a4 2024-11-23T15:25:02,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741991_1167 (size=4469) 2024-11-23T15:25:02,452 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/B of f0440d31fa3d850bbc6f1938601d069f into 3ef43b17405b4d04ad6855c7326332a4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:02,452 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:02,452 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/B, priority=13, startTime=1732375502406; duration=0sec 2024-11-23T15:25:02,452 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:02,452 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:B 2024-11-23T15:25:02,452 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:02,453 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:02,454 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/C is initiating minor compaction (all files) 2024-11-23T15:25:02,454 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/C in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,454 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/42f0fbf771194f1bb357f6514afaf4a6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/3368444e635840d0ad653085e32d5434, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/8b2d992d51954a8da937bd9f35a8b71b] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=35.6 K 2024-11-23T15:25:02,454 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 42f0fbf771194f1bb357f6514afaf4a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732375496489 2024-11-23T15:25:02,455 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3368444e635840d0ad653085e32d5434, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732375498674 2024-11-23T15:25:02,455 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b2d992d51954a8da937bd9f35a8b71b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732375501128 2024-11-23T15:25:02,462 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#C#compaction#140 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:02,463 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/2252e39be126445f8232aa37e2f8c966 is 50, key is test_row_0/C:col10/1732375501129/Put/seqid=0 2024-11-23T15:25:02,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741992_1168 (size=12493) 2024-11-23T15:25:02,486 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:02,487 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/2252e39be126445f8232aa37e2f8c966 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/2252e39be126445f8232aa37e2f8c966 2024-11-23T15:25:02,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T15:25:02,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:02,487 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-23T15:25:02,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:25:02,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:02,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:25:02,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:02,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:25:02,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:02,494 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/C of f0440d31fa3d850bbc6f1938601d069f into 2252e39be126445f8232aa37e2f8c966(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:02,494 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:02,494 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/C, priority=13, startTime=1732375502406; duration=0sec 2024-11-23T15:25:02,494 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:02,494 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:C 2024-11-23T15:25:02,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235c56f3a77ed544c881eff556493721e6_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375501136/Put/seqid=0 2024-11-23T15:25:02,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741993_1169 (size=12304) 2024-11-23T15:25:02,849 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#A#compaction#139 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:02,849 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/680681f3d81c45fd844f6c7f7e5129a9 is 175, key is test_row_0/A:col10/1732375501129/Put/seqid=0 2024-11-23T15:25:02,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741994_1170 (size=31447) 2024-11-23T15:25:02,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:02,918 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235c56f3a77ed544c881eff556493721e6_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235c56f3a77ed544c881eff556493721e6_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:02,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/f8ad57a9d58c44279516c5fd99d9ecd8, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:02,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/f8ad57a9d58c44279516c5fd99d9ecd8 is 175, key is test_row_0/A:col10/1732375501136/Put/seqid=0 2024-11-23T15:25:02,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741995_1171 (size=31105) 2024-11-23T15:25:03,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T15:25:03,261 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/680681f3d81c45fd844f6c7f7e5129a9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/680681f3d81c45fd844f6c7f7e5129a9 2024-11-23T15:25:03,268 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/A of f0440d31fa3d850bbc6f1938601d069f into 680681f3d81c45fd844f6c7f7e5129a9(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:03,268 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:03,268 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/A, priority=13, startTime=1732375502406; duration=0sec 2024-11-23T15:25:03,268 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:03,268 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:A 2024-11-23T15:25:03,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:03,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:03,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375563298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375563299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375563301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,325 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/f8ad57a9d58c44279516c5fd99d9ecd8 2024-11-23T15:25:03,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/9cce9a186c1c42448d3fe5cfb3a8055e is 50, key is test_row_0/B:col10/1732375501136/Put/seqid=0 2024-11-23T15:25:03,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741996_1172 (size=12151) 2024-11-23T15:25:03,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375563402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375563403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375563403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375563605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375563605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375563606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,738 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/9cce9a186c1c42448d3fe5cfb3a8055e 2024-11-23T15:25:03,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/19d9fc02a14c4f048b6cd132f103e61c is 50, key is test_row_0/C:col10/1732375501136/Put/seqid=0 2024-11-23T15:25:03,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741997_1173 (size=12151) 2024-11-23T15:25:03,752 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/19d9fc02a14c4f048b6cd132f103e61c 2024-11-23T15:25:03,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/f8ad57a9d58c44279516c5fd99d9ecd8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/f8ad57a9d58c44279516c5fd99d9ecd8 2024-11-23T15:25:03,761 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/f8ad57a9d58c44279516c5fd99d9ecd8, entries=150, sequenceid=174, filesize=30.4 K 2024-11-23T15:25:03,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/9cce9a186c1c42448d3fe5cfb3a8055e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/9cce9a186c1c42448d3fe5cfb3a8055e 2024-11-23T15:25:03,767 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/9cce9a186c1c42448d3fe5cfb3a8055e, entries=150, sequenceid=174, filesize=11.9 K 2024-11-23T15:25:03,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/19d9fc02a14c4f048b6cd132f103e61c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/19d9fc02a14c4f048b6cd132f103e61c 2024-11-23T15:25:03,772 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/19d9fc02a14c4f048b6cd132f103e61c, entries=150, sequenceid=174, filesize=11.9 K 2024-11-23T15:25:03,773 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for f0440d31fa3d850bbc6f1938601d069f in 1286ms, sequenceid=174, compaction requested=false 2024-11-23T15:25:03,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:03,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:03,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-23T15:25:03,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-23T15:25:03,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-23T15:25:03,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6630 sec 2024-11-23T15:25:03,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 2.6690 sec 2024-11-23T15:25:03,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:03,913 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-23T15:25:03,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:25:03,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:03,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:25:03,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:03,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:25:03,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:03,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f797276b563547ffac0e128f712bec36_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375503294/Put/seqid=0 2024-11-23T15:25:03,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741998_1174 (size=12304) 2024-11-23T15:25:03,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375563944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375563944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:03,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375563944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:04,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:04,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375564047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:04,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:04,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375564048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:04,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:04,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375564048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:04,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:04,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:04,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375564249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:04,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375564249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:04,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:04,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375564251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:04,338 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:04,343 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f797276b563547ffac0e128f712bec36_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f797276b563547ffac0e128f712bec36_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:04,344 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/afb72efd9f0f4ea2849d6189909a7afd, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:04,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/afb72efd9f0f4ea2849d6189909a7afd is 175, key is test_row_0/A:col10/1732375503294/Put/seqid=0 2024-11-23T15:25:04,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741999_1175 (size=31105) 2024-11-23T15:25:04,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:04,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375564551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:04,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:04,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375564552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:04,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:04,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375564555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:04,750 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=206, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/afb72efd9f0f4ea2849d6189909a7afd 2024-11-23T15:25:04,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/b6a4b9d01d9c41ac883d9a634e640e41 is 50, key is test_row_0/B:col10/1732375503294/Put/seqid=0 2024-11-23T15:25:04,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742000_1176 (size=12151) 2024-11-23T15:25:05,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:05,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375565055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:05,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:05,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375565056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:05,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:05,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375565057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:05,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/b6a4b9d01d9c41ac883d9a634e640e41 2024-11-23T15:25:05,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/b1c4cf000d1845d09ff574d66f54595f is 50, key is test_row_0/C:col10/1732375503294/Put/seqid=0 2024-11-23T15:25:05,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742001_1177 (size=12151) 2024-11-23T15:25:05,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T15:25:05,215 INFO [Thread-630 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-23T15:25:05,216 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:05,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-23T15:25:05,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T15:25:05,218 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:05,219 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:05,219 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:05,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T15:25:05,371 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:05,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T15:25:05,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:05,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:05,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:05,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:05,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:05,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T15:25:05,524 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:05,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T15:25:05,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:05,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:05,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:05,525 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:05,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:05,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:05,577 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/b1c4cf000d1845d09ff574d66f54595f 2024-11-23T15:25:05,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/afb72efd9f0f4ea2849d6189909a7afd as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/afb72efd9f0f4ea2849d6189909a7afd 2024-11-23T15:25:05,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/afb72efd9f0f4ea2849d6189909a7afd, entries=150, sequenceid=206, filesize=30.4 K 2024-11-23T15:25:05,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/b6a4b9d01d9c41ac883d9a634e640e41 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b6a4b9d01d9c41ac883d9a634e640e41 2024-11-23T15:25:05,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b6a4b9d01d9c41ac883d9a634e640e41, entries=150, sequenceid=206, filesize=11.9 K 2024-11-23T15:25:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/b1c4cf000d1845d09ff574d66f54595f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/b1c4cf000d1845d09ff574d66f54595f 2024-11-23T15:25:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/b1c4cf000d1845d09ff574d66f54595f, entries=150, sequenceid=206, filesize=11.9 K 2024-11-23T15:25:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for f0440d31fa3d850bbc6f1938601d069f in 1686ms, sequenceid=206, compaction requested=true 2024-11-23T15:25:05,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:05,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:05,600 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:05,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:05,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:05,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:05,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:05,600 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,601 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:05,601 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:05,601 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/B is initiating minor compaction (all files) 2024-11-23T15:25:05,601 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/A is initiating minor compaction (all files) 2024-11-23T15:25:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,601 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/B in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:05,601 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/A in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,601 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/3ef43b17405b4d04ad6855c7326332a4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/9cce9a186c1c42448d3fe5cfb3a8055e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b6a4b9d01d9c41ac883d9a634e640e41] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=35.9 K 2024-11-23T15:25:05,601 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/680681f3d81c45fd844f6c7f7e5129a9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/f8ad57a9d58c44279516c5fd99d9ecd8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/afb72efd9f0f4ea2849d6189909a7afd] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=91.5 K 2024-11-23T15:25:05,602 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:05,602 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/680681f3d81c45fd844f6c7f7e5129a9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/f8ad57a9d58c44279516c5fd99d9ecd8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/afb72efd9f0f4ea2849d6189909a7afd] 2024-11-23T15:25:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,602 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ef43b17405b4d04ad6855c7326332a4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732375501128 2024-11-23T15:25:05,602 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 680681f3d81c45fd844f6c7f7e5129a9, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732375501128 2024-11-23T15:25:05,603 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cce9a186c1c42448d3fe5cfb3a8055e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732375501136 2024-11-23T15:25:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,603 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8ad57a9d58c44279516c5fd99d9ecd8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732375501136 2024-11-23T15:25:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,603 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b6a4b9d01d9c41ac883d9a634e640e41, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732375503294 2024-11-23T15:25:05,603 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting afb72efd9f0f4ea2849d6189909a7afd, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732375503294 2024-11-23T15:25:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,611 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,614 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#B#compaction#148 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,615 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/ed5b4a0cf262438aba81a1850018872e is 50, key is test_row_0/B:col10/1732375503294/Put/seqid=0 2024-11-23T15:25:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,617 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112359d62371f94b418085c28100c920e4e7_f0440d31fa3d850bbc6f1938601d069f store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,620 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112359d62371f94b418085c28100c920e4e7_f0440d31fa3d850bbc6f1938601d069f, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,621 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112359d62371f94b418085c28100c920e4e7_f0440d31fa3d850bbc6f1938601d069f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:05,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742002_1178 (size=12595) 2024-11-23T15:25:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742003_1179 (size=4469) 2024-11-23T15:25:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,678 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:05,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T15:25:05,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:05,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,679 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-23T15:25:05,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:25:05,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:05,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:25:05,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:05,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:25:05,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123168ca47957b6472fa701b06523b52a1b_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_1/A:col10/1732375503914/Put/seqid=0 2024-11-23T15:25:05,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742004_1180 (size=9814) 2024-11-23T15:25:05,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T15:25:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:05,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,029 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/ed5b4a0cf262438aba81a1850018872e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ed5b4a0cf262438aba81a1850018872e 2024-11-23T15:25:06,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,029 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#A#compaction#147 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:06,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,030 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/ddb8acc43fb141e789883e8abc249025 is 175, key is test_row_0/A:col10/1732375503294/Put/seqid=0 2024-11-23T15:25:06,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,035 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/B of f0440d31fa3d850bbc6f1938601d069f into ed5b4a0cf262438aba81a1850018872e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:06,035 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,035 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/B, priority=13, startTime=1732375505600; duration=0sec 2024-11-23T15:25:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,036 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:06,036 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:B 2024-11-23T15:25:06,036 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,037 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,037 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/C is initiating minor compaction (all files) 2024-11-23T15:25:06,037 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/C in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:06,037 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/2252e39be126445f8232aa37e2f8c966, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/19d9fc02a14c4f048b6cd132f103e61c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/b1c4cf000d1845d09ff574d66f54595f] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=35.9 K 2024-11-23T15:25:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,038 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2252e39be126445f8232aa37e2f8c966, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732375501128 2024-11-23T15:25:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,038 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 19d9fc02a14c4f048b6cd132f103e61c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732375501136 2024-11-23T15:25:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,039 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b1c4cf000d1845d09ff574d66f54595f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732375503294 2024-11-23T15:25:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742005_1181 (size=31549) 2024-11-23T15:25:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,050 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#C#compaction#150 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,050 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/2cae9a492b6d4ee6a1e53dee31a14966 is 50, key is test_row_0/C:col10/1732375503294/Put/seqid=0 2024-11-23T15:25:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742006_1182 (size=12595) 2024-11-23T15:25:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,064 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/2cae9a492b6d4ee6a1e53dee31a14966 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/2cae9a492b6d4ee6a1e53dee31a14966 2024-11-23T15:25:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,073 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/C of f0440d31fa3d850bbc6f1938601d069f into 2cae9a492b6d4ee6a1e53dee31a14966(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:06,073 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:06,073 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/C, priority=13, startTime=1732375505600; duration=0sec 2024-11-23T15:25:06,073 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:06,073 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:C 2024-11-23T15:25:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:06,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:06,114 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123168ca47957b6472fa701b06523b52a1b_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123168ca47957b6472fa701b06523b52a1b_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:06,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/9e760a991c3e4b7ba22054ff0c45da93, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:06,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/9e760a991c3e4b7ba22054ff0c45da93 is 175, key is test_row_1/A:col10/1732375503914/Put/seqid=0 2024-11-23T15:25:06,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742007_1183 (size=22461) 2024-11-23T15:25:06,131 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/9e760a991c3e4b7ba22054ff0c45da93 2024-11-23T15:25:06,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/877636693685408589e23d71994e40ec is 50, key is test_row_1/B:col10/1732375503914/Put/seqid=0 2024-11-23T15:25:06,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742008_1184 (size=9757) 2024-11-23T15:25:06,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375566172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375566172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375566172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375566275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375566275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375566275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T15:25:06,448 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/ddb8acc43fb141e789883e8abc249025 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ddb8acc43fb141e789883e8abc249025 2024-11-23T15:25:06,454 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/A of f0440d31fa3d850bbc6f1938601d069f into ddb8acc43fb141e789883e8abc249025(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:06,454 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:06,454 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/A, priority=13, startTime=1732375505600; duration=0sec 2024-11-23T15:25:06,454 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:06,454 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:A 2024-11-23T15:25:06,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375566477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375566477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375566477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,562 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/877636693685408589e23d71994e40ec 2024-11-23T15:25:06,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/e16cc2f6fd2d4fb8839506acde182261 is 50, key is test_row_1/C:col10/1732375503914/Put/seqid=0 2024-11-23T15:25:06,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742009_1185 (size=9757) 2024-11-23T15:25:06,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375566778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375566779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:06,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375566781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:06,976 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/e16cc2f6fd2d4fb8839506acde182261 2024-11-23T15:25:06,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/9e760a991c3e4b7ba22054ff0c45da93 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/9e760a991c3e4b7ba22054ff0c45da93 2024-11-23T15:25:06,986 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/9e760a991c3e4b7ba22054ff0c45da93, entries=100, sequenceid=212, filesize=21.9 K 2024-11-23T15:25:06,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/877636693685408589e23d71994e40ec as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/877636693685408589e23d71994e40ec 2024-11-23T15:25:06,992 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/877636693685408589e23d71994e40ec, entries=100, sequenceid=212, filesize=9.5 K 2024-11-23T15:25:06,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/e16cc2f6fd2d4fb8839506acde182261 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/e16cc2f6fd2d4fb8839506acde182261 2024-11-23T15:25:06,997 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/e16cc2f6fd2d4fb8839506acde182261, entries=100, sequenceid=212, filesize=9.5 K 2024-11-23T15:25:06,998 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=194.56 KB/199230 for f0440d31fa3d850bbc6f1938601d069f in 1320ms, sequenceid=212, compaction requested=false 2024-11-23T15:25:06,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:06,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:06,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-23T15:25:06,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-23T15:25:07,001 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-23T15:25:07,001 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7800 sec 2024-11-23T15:25:07,002 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.7850 sec 2024-11-23T15:25:07,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:07,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=201.27 KB heapSize=528.09 KB 2024-11-23T15:25:07,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:25:07,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:07,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:25:07,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:07,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:25:07,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:07,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:07,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:07,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375567283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:07,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375567283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:07,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:07,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375567284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:07,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123631a26c9998e4d3d97df0b5ec3ab5c9c_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375507281/Put/seqid=0 2024-11-23T15:25:07,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742010_1186 (size=14794) 2024-11-23T15:25:07,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T15:25:07,323 INFO [Thread-630 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-23T15:25:07,325 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:07,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-23T15:25:07,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T15:25:07,326 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:07,327 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:07,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:07,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:07,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375567386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:07,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T15:25:07,478 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:07,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-23T15:25:07,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:07,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:07,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:07,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:07,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375567588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:07,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T15:25:07,632 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:07,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-23T15:25:07,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:07,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:07,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:07,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,700 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:07,705 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123631a26c9998e4d3d97df0b5ec3ab5c9c_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123631a26c9998e4d3d97df0b5ec3ab5c9c_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:07,706 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/774d9c7982214e60bbd09eb5d1f7bda8, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:07,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/774d9c7982214e60bbd09eb5d1f7bda8 is 175, key is test_row_0/A:col10/1732375507281/Put/seqid=0 2024-11-23T15:25:07,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742011_1187 (size=39749) 2024-11-23T15:25:07,712 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=248, memsize=67.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/774d9c7982214e60bbd09eb5d1f7bda8 2024-11-23T15:25:07,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/a897bfcc1f414244be667bedb5cedb63 is 50, key is test_row_0/B:col10/1732375507281/Put/seqid=0 2024-11-23T15:25:07,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742012_1188 (size=12151) 2024-11-23T15:25:07,785 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:07,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-23T15:25:07,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:07,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:07,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:07,786 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375567892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T15:25:07,938 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:07,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-23T15:25:07,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:07,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:07,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:07,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:07,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,091 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-23T15:25:08,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:08,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/a897bfcc1f414244be667bedb5cedb63 2024-11-23T15:25:08,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/298722fc71dd48538fee6f6c76b272d1 is 50, key is test_row_0/C:col10/1732375507281/Put/seqid=0 2024-11-23T15:25:08,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742013_1189 (size=12151) 2024-11-23T15:25:08,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/298722fc71dd48538fee6f6c76b272d1 2024-11-23T15:25:08,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/774d9c7982214e60bbd09eb5d1f7bda8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/774d9c7982214e60bbd09eb5d1f7bda8 2024-11-23T15:25:08,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/774d9c7982214e60bbd09eb5d1f7bda8, entries=200, sequenceid=248, filesize=38.8 K 2024-11-23T15:25:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/a897bfcc1f414244be667bedb5cedb63 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/a897bfcc1f414244be667bedb5cedb63 2024-11-23T15:25:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/a897bfcc1f414244be667bedb5cedb63, entries=150, sequenceid=248, filesize=11.9 K 2024-11-23T15:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/298722fc71dd48538fee6f6c76b272d1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/298722fc71dd48538fee6f6c76b272d1 2024-11-23T15:25:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/298722fc71dd48538fee6f6c76b272d1, entries=150, sequenceid=248, filesize=11.9 K 2024-11-23T15:25:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~201.27 KB/206100, heapSize ~528.05 KB/540720, currentSize=0 B/0 for f0440d31fa3d850bbc6f1938601d069f in 891ms, sequenceid=248, compaction requested=true 2024-11-23T15:25:08,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:08,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:08,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:08,175 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:08,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:08,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:08,175 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,176 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93759 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:08,176 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/A is initiating minor compaction (all files) 2024-11-23T15:25:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,176 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:08,176 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/A in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,176 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/B is initiating minor compaction (all files) 2024-11-23T15:25:08,176 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ddb8acc43fb141e789883e8abc249025, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/9e760a991c3e4b7ba22054ff0c45da93, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/774d9c7982214e60bbd09eb5d1f7bda8] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=91.6 K 2024-11-23T15:25:08,176 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/B in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,176 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,176 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ddb8acc43fb141e789883e8abc249025, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/9e760a991c3e4b7ba22054ff0c45da93, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/774d9c7982214e60bbd09eb5d1f7bda8] 2024-11-23T15:25:08,176 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ed5b4a0cf262438aba81a1850018872e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/877636693685408589e23d71994e40ec, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/a897bfcc1f414244be667bedb5cedb63] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=33.7 K 2024-11-23T15:25:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,177 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddb8acc43fb141e789883e8abc249025, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732375503294 2024-11-23T15:25:08,177 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting ed5b4a0cf262438aba81a1850018872e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732375503294 2024-11-23T15:25:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,178 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 877636693685408589e23d71994e40ec, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732375503914 2024-11-23T15:25:08,178 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e760a991c3e4b7ba22054ff0c45da93, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732375503914 2024-11-23T15:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,178 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 774d9c7982214e60bbd09eb5d1f7bda8, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732375506133 2024-11-23T15:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,178 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a897bfcc1f414244be667bedb5cedb63, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732375506133 2024-11-23T15:25:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,188 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#B#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,189 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/26fb6472a4d84c80ab38b6b52faca71d is 50, key is test_row_0/B:col10/1732375507281/Put/seqid=0 2024-11-23T15:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,201 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742014_1190 (size=12697) 2024-11-23T15:25:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,213 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123151670d531f045db94f6441fb207f279_f0440d31fa3d850bbc6f1938601d069f store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,215 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123151670d531f045db94f6441fb207f279_f0440d31fa3d850bbc6f1938601d069f, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:08,215 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123151670d531f045db94f6441fb207f279_f0440d31fa3d850bbc6f1938601d069f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,217 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/26fb6472a4d84c80ab38b6b52faca71d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/26fb6472a4d84c80ab38b6b52faca71d 2024-11-23T15:25:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,224 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/B of f0440d31fa3d850bbc6f1938601d069f into 26fb6472a4d84c80ab38b6b52faca71d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:08,224 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:08,224 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/B, priority=13, startTime=1732375508175; duration=0sec 2024-11-23T15:25:08,224 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:08,224 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:B 2024-11-23T15:25:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,224 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,226 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,226 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/C is initiating minor compaction (all files) 2024-11-23T15:25:08,226 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/C in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,226 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/2cae9a492b6d4ee6a1e53dee31a14966, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/e16cc2f6fd2d4fb8839506acde182261, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/298722fc71dd48538fee6f6c76b272d1] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=33.7 K 2024-11-23T15:25:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742015_1191 (size=4469) 2024-11-23T15:25:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,227 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cae9a492b6d4ee6a1e53dee31a14966, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732375503294 2024-11-23T15:25:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,227 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting e16cc2f6fd2d4fb8839506acde182261, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732375503914 2024-11-23T15:25:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,228 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#A#compaction#157 average throughput is 0.90 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:08,228 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 298722fc71dd48538fee6f6c76b272d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732375506133 2024-11-23T15:25:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,229 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/0df08b3cabaa4fd7b1c19018d6c4033c is 175, key is test_row_0/A:col10/1732375507281/Put/seqid=0 2024-11-23T15:25:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742016_1192 (size=31651) 2024-11-23T15:25:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,245 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-23T15:25:08,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:08,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-23T15:25:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-23T15:25:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,248 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/0df08b3cabaa4fd7b1c19018d6c4033c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/0df08b3cabaa4fd7b1c19018d6c4033c 2024-11-23T15:25:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,249 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-23T15:25:08,249 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 920 msec 2024-11-23T15:25:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 924 msec 2024-11-23T15:25:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,254 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#C#compaction#158 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,254 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/cfab3da7ee054e86af256cef828af6a9 is 50, key is test_row_0/C:col10/1732375507281/Put/seqid=0 2024-11-23T15:25:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,260 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/A of f0440d31fa3d850bbc6f1938601d069f into 0df08b3cabaa4fd7b1c19018d6c4033c(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:08,260 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:08,260 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/A, priority=13, startTime=1732375508174; duration=0sec 2024-11-23T15:25:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,260 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:08,260 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:A 2024-11-23T15:25:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742017_1193 (size=12697) 2024-11-23T15:25:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:25:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:25:08,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:08,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:25:08,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:08,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:25:08,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:08,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232e75c168c91f4bd1a279a0e51db8d633_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375508394/Put/seqid=0 2024-11-23T15:25:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742018_1194 (size=20074) 2024-11-23T15:25:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T15:25:08,429 INFO [Thread-630 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-23T15:25:08,430 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-23T15:25:08,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T15:25:08,432 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:08,432 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:08,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:08,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:08,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375568454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:08,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375568454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:08,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375568456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T15:25:08,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:08,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375568557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:08,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375568559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375568559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,583 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:08,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:08,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,584 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,669 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/cfab3da7ee054e86af256cef828af6a9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/cfab3da7ee054e86af256cef828af6a9 2024-11-23T15:25:08,674 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/C of f0440d31fa3d850bbc6f1938601d069f into cfab3da7ee054e86af256cef828af6a9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:08,674 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:08,674 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/C, priority=13, startTime=1732375508175; duration=0sec 2024-11-23T15:25:08,674 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:08,674 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:C 2024-11-23T15:25:08,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T15:25:08,736 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:08,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:08,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,737 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375568761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375568761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375568761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,814 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:08,818 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232e75c168c91f4bd1a279a0e51db8d633_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232e75c168c91f4bd1a279a0e51db8d633_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:08,819 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/4c610dfe737e4c3d9d6783b4434bd449, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:08,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/4c610dfe737e4c3d9d6783b4434bd449 is 175, key is test_row_0/A:col10/1732375508394/Put/seqid=0 2024-11-23T15:25:08,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742019_1195 (size=57333) 2024-11-23T15:25:08,889 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:08,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:08,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:08,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:08,890 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T15:25:09,042 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:09,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:09,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:09,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375569064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:09,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375569064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:09,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375569065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:09,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33976 deadline: 1732375569163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,165 DEBUG [Thread-628 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18185 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:25:09,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:09,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33998 deadline: 1732375569189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,192 DEBUG [Thread-624 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18212 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:25:09,195 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:09,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:09,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,229 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=263, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/4c610dfe737e4c3d9d6783b4434bd449 2024-11-23T15:25:09,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/ec93126ea5cf40cf86d09b976ac3789c is 50, key is test_row_0/B:col10/1732375508394/Put/seqid=0 2024-11-23T15:25:09,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742020_1196 (size=12301) 2024-11-23T15:25:09,348 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:09,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:09,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,349 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,501 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:09,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:09,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,502 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T15:25:09,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:09,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375569569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:09,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375569569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:09,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375569569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/ec93126ea5cf40cf86d09b976ac3789c 2024-11-23T15:25:09,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/742ad51e027349878ccc5146994c5e96 is 50, key is test_row_0/C:col10/1732375508394/Put/seqid=0 2024-11-23T15:25:09,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742021_1197 (size=12301) 2024-11-23T15:25:09,658 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:09,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:09,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,659 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,811 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:09,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:09,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,812 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,964 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:09,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:09,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:09,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:09,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:09,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:10,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/742ad51e027349878ccc5146994c5e96 2024-11-23T15:25:10,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/4c610dfe737e4c3d9d6783b4434bd449 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/4c610dfe737e4c3d9d6783b4434bd449 2024-11-23T15:25:10,064 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/4c610dfe737e4c3d9d6783b4434bd449, entries=300, sequenceid=263, filesize=56.0 K 2024-11-23T15:25:10,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/ec93126ea5cf40cf86d09b976ac3789c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ec93126ea5cf40cf86d09b976ac3789c 2024-11-23T15:25:10,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ec93126ea5cf40cf86d09b976ac3789c, entries=150, sequenceid=263, filesize=12.0 K 2024-11-23T15:25:10,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/742ad51e027349878ccc5146994c5e96 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/742ad51e027349878ccc5146994c5e96 2024-11-23T15:25:10,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/742ad51e027349878ccc5146994c5e96, entries=150, sequenceid=263, filesize=12.0 K 2024-11-23T15:25:10,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f0440d31fa3d850bbc6f1938601d069f in 1680ms, sequenceid=263, compaction requested=false 2024-11-23T15:25:10,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:10,117 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:10,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T15:25:10,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:10,118 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T15:25:10,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:25:10,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:10,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:25:10,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:10,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:25:10,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:10,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a0871cfa0de0437186aa44bfb3bc01b2_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375508452/Put/seqid=0 2024-11-23T15:25:10,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742022_1198 (size=12454) 2024-11-23T15:25:10,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:10,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T15:25:10,536 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a0871cfa0de0437186aa44bfb3bc01b2_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a0871cfa0de0437186aa44bfb3bc01b2_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:10,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/2eede83c04b24e948e756f3d747f0e54, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:10,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/2eede83c04b24e948e756f3d747f0e54 is 175, key is test_row_0/A:col10/1732375508452/Put/seqid=0 2024-11-23T15:25:10,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742023_1199 (size=31255) 2024-11-23T15:25:10,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:10,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. as already flushing 2024-11-23T15:25:10,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:10,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375570583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:10,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:10,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375570584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:10,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:10,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375570585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:10,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:10,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375570686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:10,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:10,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375570686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:10,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:10,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375570687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:10,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:10,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375570888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:10,889 DEBUG [Thread-631 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:62881 2024-11-23T15:25:10,889 DEBUG [Thread-631 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:10,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:10,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375570889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:10,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:10,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375570890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:10,891 DEBUG [Thread-633 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:62881 2024-11-23T15:25:10,891 DEBUG [Thread-633 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:10,891 DEBUG [Thread-635 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:62881 2024-11-23T15:25:10,891 DEBUG [Thread-635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:10,894 DEBUG [Thread-637 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a11164b to 127.0.0.1:62881 2024-11-23T15:25:10,894 DEBUG [Thread-637 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:10,944 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=288, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/2eede83c04b24e948e756f3d747f0e54 2024-11-23T15:25:10,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/83ac198b2e15441b8d919132c10e131e is 50, key is test_row_0/B:col10/1732375508452/Put/seqid=0 2024-11-23T15:25:10,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742024_1200 (size=12301) 2024-11-23T15:25:11,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:11,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375571191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375571191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:11,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375571192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:11,356 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/83ac198b2e15441b8d919132c10e131e 2024-11-23T15:25:11,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/7e8d9a499a154e909ae00788c9cade7b is 50, key is test_row_0/C:col10/1732375508452/Put/seqid=0 2024-11-23T15:25:11,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742025_1201 (size=12301) 2024-11-23T15:25:11,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:11,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34024 deadline: 1732375571694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34008 deadline: 1732375571694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:11,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:11,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33992 deadline: 1732375571697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:11,767 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/7e8d9a499a154e909ae00788c9cade7b 2024-11-23T15:25:11,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/2eede83c04b24e948e756f3d747f0e54 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2eede83c04b24e948e756f3d747f0e54 2024-11-23T15:25:11,776 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2eede83c04b24e948e756f3d747f0e54, entries=150, sequenceid=288, filesize=30.5 K 2024-11-23T15:25:11,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/83ac198b2e15441b8d919132c10e131e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/83ac198b2e15441b8d919132c10e131e 2024-11-23T15:25:11,780 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/83ac198b2e15441b8d919132c10e131e, entries=150, sequenceid=288, filesize=12.0 K 2024-11-23T15:25:11,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/7e8d9a499a154e909ae00788c9cade7b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e8d9a499a154e909ae00788c9cade7b 2024-11-23T15:25:11,784 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e8d9a499a154e909ae00788c9cade7b, entries=150, sequenceid=288, filesize=12.0 K 2024-11-23T15:25:11,785 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for f0440d31fa3d850bbc6f1938601d069f in 1667ms, sequenceid=288, compaction requested=true 2024-11-23T15:25:11,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:11,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:11,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-23T15:25:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-23T15:25:11,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-23T15:25:11,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3540 sec 2024-11-23T15:25:11,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 3.3580 sec 2024-11-23T15:25:12,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T15:25:12,537 INFO [Thread-630 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-23T15:25:12,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:12,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T15:25:12,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:25:12,697 DEBUG [Thread-620 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cae6c5c to 127.0.0.1:62881 2024-11-23T15:25:12,697 DEBUG [Thread-620 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:12,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:12,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:25:12,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:12,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:25:12,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:12,698 DEBUG [Thread-622 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c820ef9 to 127.0.0.1:62881 2024-11-23T15:25:12,698 DEBUG [Thread-622 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:12,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a4ea646a5f5e4a42a00e2a82a37b2227_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375512695/Put/seqid=0 2024-11-23T15:25:12,706 DEBUG [Thread-626 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:62881 2024-11-23T15:25:12,706 DEBUG [Thread-626 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:12,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742026_1202 (size=12454) 2024-11-23T15:25:13,108 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:13,112 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a4ea646a5f5e4a42a00e2a82a37b2227_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a4ea646a5f5e4a42a00e2a82a37b2227_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:13,113 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/d73ba6aa04284c92941d3dafadb5b7ad, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:13,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/d73ba6aa04284c92941d3dafadb5b7ad is 175, key is test_row_0/A:col10/1732375512695/Put/seqid=0 2024-11-23T15:25:13,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742027_1203 (size=31255) 2024-11-23T15:25:13,518 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=301, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/d73ba6aa04284c92941d3dafadb5b7ad 2024-11-23T15:25:13,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/e827513f68764290b5fe082565fc9a16 is 50, key is test_row_0/B:col10/1732375512695/Put/seqid=0 2024-11-23T15:25:13,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742028_1204 (size=12301) 2024-11-23T15:25:13,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/e827513f68764290b5fe082565fc9a16 2024-11-23T15:25:13,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/487fadd1ddb245688f8b688333805507 is 50, key is test_row_0/C:col10/1732375512695/Put/seqid=0 2024-11-23T15:25:13,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742029_1205 (size=12301) 2024-11-23T15:25:14,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/487fadd1ddb245688f8b688333805507 2024-11-23T15:25:14,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/d73ba6aa04284c92941d3dafadb5b7ad as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d73ba6aa04284c92941d3dafadb5b7ad 2024-11-23T15:25:14,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d73ba6aa04284c92941d3dafadb5b7ad, entries=150, sequenceid=301, filesize=30.5 K 2024-11-23T15:25:14,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/e827513f68764290b5fe082565fc9a16 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/e827513f68764290b5fe082565fc9a16 2024-11-23T15:25:14,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/e827513f68764290b5fe082565fc9a16, entries=150, sequenceid=301, filesize=12.0 K 2024-11-23T15:25:14,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/487fadd1ddb245688f8b688333805507 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/487fadd1ddb245688f8b688333805507 2024-11-23T15:25:14,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/487fadd1ddb245688f8b688333805507, entries=150, sequenceid=301, filesize=12.0 K 2024-11-23T15:25:14,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=13.42 KB/13740 for f0440d31fa3d850bbc6f1938601d069f in 1660ms, sequenceid=301, compaction requested=true 2024-11-23T15:25:14,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:14,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:14,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:14,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:14,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:14,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f0440d31fa3d850bbc6f1938601d069f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:14,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:14,358 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:14,358 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:14,359 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49600 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:14,359 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 151494 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:14,359 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/B is initiating minor compaction (all files) 2024-11-23T15:25:14,359 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/B in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:14,359 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/A is initiating minor compaction (all files) 2024-11-23T15:25:14,359 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/A in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:14,359 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/26fb6472a4d84c80ab38b6b52faca71d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ec93126ea5cf40cf86d09b976ac3789c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/83ac198b2e15441b8d919132c10e131e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/e827513f68764290b5fe082565fc9a16] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=48.4 K 2024-11-23T15:25:14,359 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/0df08b3cabaa4fd7b1c19018d6c4033c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/4c610dfe737e4c3d9d6783b4434bd449, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2eede83c04b24e948e756f3d747f0e54, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d73ba6aa04284c92941d3dafadb5b7ad] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=147.9 K 2024-11-23T15:25:14,359 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:14,359 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/0df08b3cabaa4fd7b1c19018d6c4033c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/4c610dfe737e4c3d9d6783b4434bd449, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2eede83c04b24e948e756f3d747f0e54, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d73ba6aa04284c92941d3dafadb5b7ad] 2024-11-23T15:25:14,359 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 26fb6472a4d84c80ab38b6b52faca71d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732375506133 2024-11-23T15:25:14,360 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0df08b3cabaa4fd7b1c19018d6c4033c, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732375506133 2024-11-23T15:25:14,360 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting ec93126ea5cf40cf86d09b976ac3789c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732375508391 2024-11-23T15:25:14,360 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c610dfe737e4c3d9d6783b4434bd449, keycount=300, bloomtype=ROW, size=56.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732375508361 2024-11-23T15:25:14,360 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 83ac198b2e15441b8d919132c10e131e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732375508452 2024-11-23T15:25:14,360 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2eede83c04b24e948e756f3d747f0e54, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732375508452 2024-11-23T15:25:14,360 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting e827513f68764290b5fe082565fc9a16, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732375510582 2024-11-23T15:25:14,361 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d73ba6aa04284c92941d3dafadb5b7ad, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732375510582 2024-11-23T15:25:14,370 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#B#compaction#168 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:14,370 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/798c46cc9e684c34a62da64fbcf8ff8b is 50, key is test_row_0/B:col10/1732375512695/Put/seqid=0 2024-11-23T15:25:14,374 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:14,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742030_1206 (size=12983) 2024-11-23T15:25:14,376 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123b9f63a9d5d8748fda75f346cbc383eb1_f0440d31fa3d850bbc6f1938601d069f store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:14,402 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123b9f63a9d5d8748fda75f346cbc383eb1_f0440d31fa3d850bbc6f1938601d069f, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:14,403 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123b9f63a9d5d8748fda75f346cbc383eb1_f0440d31fa3d850bbc6f1938601d069f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:14,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742031_1207 (size=4469) 2024-11-23T15:25:14,780 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/798c46cc9e684c34a62da64fbcf8ff8b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/798c46cc9e684c34a62da64fbcf8ff8b 2024-11-23T15:25:14,785 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/B of f0440d31fa3d850bbc6f1938601d069f into 798c46cc9e684c34a62da64fbcf8ff8b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:14,785 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:14,785 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/B, priority=12, startTime=1732375514358; duration=0sec 2024-11-23T15:25:14,785 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:14,785 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:B 2024-11-23T15:25:14,785 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:14,786 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49600 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:14,786 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): f0440d31fa3d850bbc6f1938601d069f/C is initiating minor compaction (all files) 2024-11-23T15:25:14,786 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f0440d31fa3d850bbc6f1938601d069f/C in TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:14,786 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/cfab3da7ee054e86af256cef828af6a9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/742ad51e027349878ccc5146994c5e96, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e8d9a499a154e909ae00788c9cade7b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/487fadd1ddb245688f8b688333805507] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp, totalSize=48.4 K 2024-11-23T15:25:14,787 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting cfab3da7ee054e86af256cef828af6a9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732375506133 2024-11-23T15:25:14,787 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 742ad51e027349878ccc5146994c5e96, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732375508391 2024-11-23T15:25:14,787 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e8d9a499a154e909ae00788c9cade7b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732375508452 2024-11-23T15:25:14,787 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 487fadd1ddb245688f8b688333805507, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732375510582 2024-11-23T15:25:14,796 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#C#compaction#170 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:14,797 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/b6ae061d5e0648b484cd18757f7e66f2 is 50, key is test_row_0/C:col10/1732375512695/Put/seqid=0 2024-11-23T15:25:14,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742032_1208 (size=12983) 2024-11-23T15:25:14,808 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f0440d31fa3d850bbc6f1938601d069f#A#compaction#169 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:14,809 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/ffffb113097b432899f0ea17761a8937 is 175, key is test_row_0/A:col10/1732375512695/Put/seqid=0 2024-11-23T15:25:14,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742033_1209 (size=31937) 2024-11-23T15:25:15,206 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/b6ae061d5e0648b484cd18757f7e66f2 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/b6ae061d5e0648b484cd18757f7e66f2 2024-11-23T15:25:15,211 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/C of f0440d31fa3d850bbc6f1938601d069f into b6ae061d5e0648b484cd18757f7e66f2(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:15,211 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:15,211 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/C, priority=12, startTime=1732375514358; duration=0sec 2024-11-23T15:25:15,211 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:15,211 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:C 2024-11-23T15:25:15,218 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/ffffb113097b432899f0ea17761a8937 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ffffb113097b432899f0ea17761a8937 2024-11-23T15:25:15,222 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f0440d31fa3d850bbc6f1938601d069f/A of f0440d31fa3d850bbc6f1938601d069f into ffffb113097b432899f0ea17761a8937(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:15,222 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:15,222 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f., storeName=f0440d31fa3d850bbc6f1938601d069f/A, priority=12, startTime=1732375514357; duration=0sec 2024-11-23T15:25:15,222 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:15,222 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f0440d31fa3d850bbc6f1938601d069f:A 2024-11-23T15:25:15,402 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T15:25:19,230 DEBUG [Thread-628 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:62881 2024-11-23T15:25:19,230 DEBUG [Thread-628 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:19,273 DEBUG [Thread-624 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b44b1e5 to 127.0.0.1:62881 2024-11-23T15:25:19,273 DEBUG [Thread-624 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 7 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 9 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7833 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7665 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3244 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9732 rows 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3254 2024-11-23T15:25:19,274 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9762 rows 2024-11-23T15:25:19,274 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T15:25:19,274 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a9b9802 to 127.0.0.1:62881 2024-11-23T15:25:19,274 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:19,279 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T15:25:19,280 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T15:25:19,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:19,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T15:25:19,283 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375519283"}]},"ts":"1732375519283"} 2024-11-23T15:25:19,285 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T15:25:19,287 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T15:25:19,287 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T15:25:19,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, UNASSIGN}] 2024-11-23T15:25:19,289 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, UNASSIGN 2024-11-23T15:25:19,290 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=f0440d31fa3d850bbc6f1938601d069f, regionState=CLOSING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:25:19,290 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T15:25:19,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; CloseRegionProcedure f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:25:19,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T15:25:19,442 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:19,442 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(124): Close f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:19,442 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T15:25:19,442 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1681): Closing f0440d31fa3d850bbc6f1938601d069f, disabling compactions & flushes 2024-11-23T15:25:19,442 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:19,443 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:19,443 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. after waiting 0 ms 2024-11-23T15:25:19,443 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:19,443 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(2837): Flushing f0440d31fa3d850bbc6f1938601d069f 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-23T15:25:19,443 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=A 2024-11-23T15:25:19,443 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:19,443 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=B 2024-11-23T15:25:19,443 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:19,443 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f0440d31fa3d850bbc6f1938601d069f, store=C 2024-11-23T15:25:19,443 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:19,450 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c4ed605093ae4fd5bce25ece44891285_f0440d31fa3d850bbc6f1938601d069f is 50, key is test_row_0/A:col10/1732375519229/Put/seqid=0 2024-11-23T15:25:19,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742034_1210 (size=12454) 2024-11-23T15:25:19,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T15:25:19,854 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:19,859 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c4ed605093ae4fd5bce25ece44891285_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c4ed605093ae4fd5bce25ece44891285_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:19,860 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/01ca690995a246acb81f4d52b6c0ff5b, store: [table=TestAcidGuarantees family=A region=f0440d31fa3d850bbc6f1938601d069f] 2024-11-23T15:25:19,860 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/01ca690995a246acb81f4d52b6c0ff5b is 175, key is test_row_0/A:col10/1732375519229/Put/seqid=0 2024-11-23T15:25:19,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742035_1211 (size=31255) 2024-11-23T15:25:19,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T15:25:20,265 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=311, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/01ca690995a246acb81f4d52b6c0ff5b 2024-11-23T15:25:20,272 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/8993c4bb22c449e0bdec51d3c9c201f1 is 50, key is test_row_0/B:col10/1732375519229/Put/seqid=0 2024-11-23T15:25:20,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742036_1212 (size=12301) 2024-11-23T15:25:20,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T15:25:20,677 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/8993c4bb22c449e0bdec51d3c9c201f1 2024-11-23T15:25:20,684 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/8860eef92cd3481882190d8fb4289e73 is 50, key is test_row_0/C:col10/1732375519229/Put/seqid=0 2024-11-23T15:25:20,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742037_1213 (size=12301) 2024-11-23T15:25:21,088 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/8860eef92cd3481882190d8fb4289e73 2024-11-23T15:25:21,093 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/A/01ca690995a246acb81f4d52b6c0ff5b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/01ca690995a246acb81f4d52b6c0ff5b 2024-11-23T15:25:21,097 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/01ca690995a246acb81f4d52b6c0ff5b, entries=150, sequenceid=311, filesize=30.5 K 2024-11-23T15:25:21,097 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/B/8993c4bb22c449e0bdec51d3c9c201f1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/8993c4bb22c449e0bdec51d3c9c201f1 2024-11-23T15:25:21,101 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/8993c4bb22c449e0bdec51d3c9c201f1, entries=150, sequenceid=311, filesize=12.0 K 2024-11-23T15:25:21,102 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/.tmp/C/8860eef92cd3481882190d8fb4289e73 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/8860eef92cd3481882190d8fb4289e73 2024-11-23T15:25:21,105 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/8860eef92cd3481882190d8fb4289e73, entries=150, sequenceid=311, filesize=12.0 K 2024-11-23T15:25:21,106 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for f0440d31fa3d850bbc6f1938601d069f in 1663ms, sequenceid=311, compaction requested=false 2024-11-23T15:25:21,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/12b8eef63a044b328496d4c655ef2418, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ce3a025a03394b8f879fe2c36f1aa7d2, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2021aa193efb4b408d3c101c8546e417, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d0d39c0b2a5a424299056198090a18a1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/e64b29092e7246b3b1d2fc52954c57b4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/45820ca9b5ec4658b00b5f8bdff63770, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/bd5f8701470d4df5b82a94b4f873deda, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d66fac1d261c4dd9aaccdf5b66bfff4f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/b405604ebba64d579afb9585a76e49b1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/a00091b266fb4242aea95ed99a1d4b33, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/680681f3d81c45fd844f6c7f7e5129a9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/f8ad57a9d58c44279516c5fd99d9ecd8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ddb8acc43fb141e789883e8abc249025, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/afb72efd9f0f4ea2849d6189909a7afd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/9e760a991c3e4b7ba22054ff0c45da93, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/774d9c7982214e60bbd09eb5d1f7bda8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/0df08b3cabaa4fd7b1c19018d6c4033c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/4c610dfe737e4c3d9d6783b4434bd449, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2eede83c04b24e948e756f3d747f0e54, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d73ba6aa04284c92941d3dafadb5b7ad] to archive 2024-11-23T15:25:21,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:25:21,109 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/12b8eef63a044b328496d4c655ef2418 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/12b8eef63a044b328496d4c655ef2418 2024-11-23T15:25:21,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ce3a025a03394b8f879fe2c36f1aa7d2 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ce3a025a03394b8f879fe2c36f1aa7d2 2024-11-23T15:25:21,112 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2021aa193efb4b408d3c101c8546e417 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2021aa193efb4b408d3c101c8546e417 2024-11-23T15:25:21,113 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d0d39c0b2a5a424299056198090a18a1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d0d39c0b2a5a424299056198090a18a1 2024-11-23T15:25:21,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/e64b29092e7246b3b1d2fc52954c57b4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/e64b29092e7246b3b1d2fc52954c57b4 2024-11-23T15:25:21,115 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/45820ca9b5ec4658b00b5f8bdff63770 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/45820ca9b5ec4658b00b5f8bdff63770 2024-11-23T15:25:21,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/bd5f8701470d4df5b82a94b4f873deda to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/bd5f8701470d4df5b82a94b4f873deda 2024-11-23T15:25:21,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d66fac1d261c4dd9aaccdf5b66bfff4f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d66fac1d261c4dd9aaccdf5b66bfff4f 2024-11-23T15:25:21,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/b405604ebba64d579afb9585a76e49b1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/b405604ebba64d579afb9585a76e49b1 2024-11-23T15:25:21,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/a00091b266fb4242aea95ed99a1d4b33 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/a00091b266fb4242aea95ed99a1d4b33 2024-11-23T15:25:21,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/680681f3d81c45fd844f6c7f7e5129a9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/680681f3d81c45fd844f6c7f7e5129a9 2024-11-23T15:25:21,123 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/f8ad57a9d58c44279516c5fd99d9ecd8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/f8ad57a9d58c44279516c5fd99d9ecd8 2024-11-23T15:25:21,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ddb8acc43fb141e789883e8abc249025 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ddb8acc43fb141e789883e8abc249025 2024-11-23T15:25:21,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/afb72efd9f0f4ea2849d6189909a7afd to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/afb72efd9f0f4ea2849d6189909a7afd 2024-11-23T15:25:21,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/9e760a991c3e4b7ba22054ff0c45da93 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/9e760a991c3e4b7ba22054ff0c45da93 2024-11-23T15:25:21,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/774d9c7982214e60bbd09eb5d1f7bda8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/774d9c7982214e60bbd09eb5d1f7bda8 2024-11-23T15:25:21,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/0df08b3cabaa4fd7b1c19018d6c4033c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/0df08b3cabaa4fd7b1c19018d6c4033c 2024-11-23T15:25:21,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/4c610dfe737e4c3d9d6783b4434bd449 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/4c610dfe737e4c3d9d6783b4434bd449 2024-11-23T15:25:21,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2eede83c04b24e948e756f3d747f0e54 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/2eede83c04b24e948e756f3d747f0e54 2024-11-23T15:25:21,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d73ba6aa04284c92941d3dafadb5b7ad to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/d73ba6aa04284c92941d3dafadb5b7ad 2024-11-23T15:25:21,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/5d215038b9f243ae80156d480a0f0a79, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/70971355dde546e3844b4abf11c04472, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/555d6e44cd5b4de591f446b830b9e552, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/d17af16858704b1dbf3febd6cef27166, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/deff05ae80404b0c98211e57a668fa35, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/298eb9a657564231815973713e64a870, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/71baec5dfa0a4c9489c3bfd1a9395675, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/89574a63d5e74416949991dd137234a5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/7d96ff53eabf47b9bf5ae7b90e4f8082, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/3ef43b17405b4d04ad6855c7326332a4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b034885a5d8b4d64b0d180515b63f7a6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/9cce9a186c1c42448d3fe5cfb3a8055e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ed5b4a0cf262438aba81a1850018872e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b6a4b9d01d9c41ac883d9a634e640e41, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/877636693685408589e23d71994e40ec, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/26fb6472a4d84c80ab38b6b52faca71d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/a897bfcc1f414244be667bedb5cedb63, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ec93126ea5cf40cf86d09b976ac3789c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/83ac198b2e15441b8d919132c10e131e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/e827513f68764290b5fe082565fc9a16] to archive 2024-11-23T15:25:21,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:25:21,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/5d215038b9f243ae80156d480a0f0a79 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/5d215038b9f243ae80156d480a0f0a79 2024-11-23T15:25:21,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/70971355dde546e3844b4abf11c04472 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/70971355dde546e3844b4abf11c04472 2024-11-23T15:25:21,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/555d6e44cd5b4de591f446b830b9e552 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/555d6e44cd5b4de591f446b830b9e552 2024-11-23T15:25:21,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/d17af16858704b1dbf3febd6cef27166 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/d17af16858704b1dbf3febd6cef27166 2024-11-23T15:25:21,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/deff05ae80404b0c98211e57a668fa35 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/deff05ae80404b0c98211e57a668fa35 2024-11-23T15:25:21,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/298eb9a657564231815973713e64a870 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/298eb9a657564231815973713e64a870 2024-11-23T15:25:21,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/71baec5dfa0a4c9489c3bfd1a9395675 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/71baec5dfa0a4c9489c3bfd1a9395675 2024-11-23T15:25:21,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/89574a63d5e74416949991dd137234a5 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/89574a63d5e74416949991dd137234a5 2024-11-23T15:25:21,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/7d96ff53eabf47b9bf5ae7b90e4f8082 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/7d96ff53eabf47b9bf5ae7b90e4f8082 2024-11-23T15:25:21,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/3ef43b17405b4d04ad6855c7326332a4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/3ef43b17405b4d04ad6855c7326332a4 2024-11-23T15:25:21,145 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b034885a5d8b4d64b0d180515b63f7a6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b034885a5d8b4d64b0d180515b63f7a6 2024-11-23T15:25:21,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/9cce9a186c1c42448d3fe5cfb3a8055e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/9cce9a186c1c42448d3fe5cfb3a8055e 2024-11-23T15:25:21,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ed5b4a0cf262438aba81a1850018872e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ed5b4a0cf262438aba81a1850018872e 2024-11-23T15:25:21,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b6a4b9d01d9c41ac883d9a634e640e41 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/b6a4b9d01d9c41ac883d9a634e640e41 2024-11-23T15:25:21,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/877636693685408589e23d71994e40ec to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/877636693685408589e23d71994e40ec 2024-11-23T15:25:21,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/26fb6472a4d84c80ab38b6b52faca71d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/26fb6472a4d84c80ab38b6b52faca71d 2024-11-23T15:25:21,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/a897bfcc1f414244be667bedb5cedb63 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/a897bfcc1f414244be667bedb5cedb63 2024-11-23T15:25:21,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ec93126ea5cf40cf86d09b976ac3789c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/ec93126ea5cf40cf86d09b976ac3789c 2024-11-23T15:25:21,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/83ac198b2e15441b8d919132c10e131e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/83ac198b2e15441b8d919132c10e131e 2024-11-23T15:25:21,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/e827513f68764290b5fe082565fc9a16 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/e827513f68764290b5fe082565fc9a16 2024-11-23T15:25:21,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/0421148a5b2c4da2baf921cf3debb80f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/4b8f78c184ec45e3bd5bcf1001c0dbd5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e3396e9ca7440e7a5daff27dd1714dd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/fb95697ec3bf4f9aaa9581d694843c76, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7fb6568336694dcdbec250d14a3e5478, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/5ab12513226643d8b80903e69caca199, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/42f0fbf771194f1bb357f6514afaf4a6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/bdf9429412824d6fa5a8ba6f98b89974, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/3368444e635840d0ad653085e32d5434, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/2252e39be126445f8232aa37e2f8c966, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/8b2d992d51954a8da937bd9f35a8b71b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/19d9fc02a14c4f048b6cd132f103e61c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/2cae9a492b6d4ee6a1e53dee31a14966, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/b1c4cf000d1845d09ff574d66f54595f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/e16cc2f6fd2d4fb8839506acde182261, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/cfab3da7ee054e86af256cef828af6a9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/298722fc71dd48538fee6f6c76b272d1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/742ad51e027349878ccc5146994c5e96, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e8d9a499a154e909ae00788c9cade7b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/487fadd1ddb245688f8b688333805507] to archive 2024-11-23T15:25:21,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:25:21,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/0421148a5b2c4da2baf921cf3debb80f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/0421148a5b2c4da2baf921cf3debb80f 2024-11-23T15:25:21,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/4b8f78c184ec45e3bd5bcf1001c0dbd5 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/4b8f78c184ec45e3bd5bcf1001c0dbd5 2024-11-23T15:25:21,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e3396e9ca7440e7a5daff27dd1714dd to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e3396e9ca7440e7a5daff27dd1714dd 2024-11-23T15:25:21,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/fb95697ec3bf4f9aaa9581d694843c76 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/fb95697ec3bf4f9aaa9581d694843c76 2024-11-23T15:25:21,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7fb6568336694dcdbec250d14a3e5478 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7fb6568336694dcdbec250d14a3e5478 2024-11-23T15:25:21,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/5ab12513226643d8b80903e69caca199 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/5ab12513226643d8b80903e69caca199 2024-11-23T15:25:21,165 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/42f0fbf771194f1bb357f6514afaf4a6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/42f0fbf771194f1bb357f6514afaf4a6 2024-11-23T15:25:21,166 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/bdf9429412824d6fa5a8ba6f98b89974 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/bdf9429412824d6fa5a8ba6f98b89974 2024-11-23T15:25:21,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/3368444e635840d0ad653085e32d5434 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/3368444e635840d0ad653085e32d5434 2024-11-23T15:25:21,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/2252e39be126445f8232aa37e2f8c966 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/2252e39be126445f8232aa37e2f8c966 2024-11-23T15:25:21,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/8b2d992d51954a8da937bd9f35a8b71b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/8b2d992d51954a8da937bd9f35a8b71b 2024-11-23T15:25:21,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/19d9fc02a14c4f048b6cd132f103e61c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/19d9fc02a14c4f048b6cd132f103e61c 2024-11-23T15:25:21,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/2cae9a492b6d4ee6a1e53dee31a14966 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/2cae9a492b6d4ee6a1e53dee31a14966 2024-11-23T15:25:21,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/b1c4cf000d1845d09ff574d66f54595f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/b1c4cf000d1845d09ff574d66f54595f 2024-11-23T15:25:21,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/e16cc2f6fd2d4fb8839506acde182261 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/e16cc2f6fd2d4fb8839506acde182261 2024-11-23T15:25:21,173 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/cfab3da7ee054e86af256cef828af6a9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/cfab3da7ee054e86af256cef828af6a9 2024-11-23T15:25:21,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/298722fc71dd48538fee6f6c76b272d1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/298722fc71dd48538fee6f6c76b272d1 2024-11-23T15:25:21,175 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/742ad51e027349878ccc5146994c5e96 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/742ad51e027349878ccc5146994c5e96 2024-11-23T15:25:21,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e8d9a499a154e909ae00788c9cade7b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/7e8d9a499a154e909ae00788c9cade7b 2024-11-23T15:25:21,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/487fadd1ddb245688f8b688333805507 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/487fadd1ddb245688f8b688333805507 2024-11-23T15:25:21,181 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/recovered.edits/314.seqid, newMaxSeqId=314, maxSeqId=4 2024-11-23T15:25:21,181 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f. 2024-11-23T15:25:21,181 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1635): Region close journal for f0440d31fa3d850bbc6f1938601d069f: 2024-11-23T15:25:21,183 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=f0440d31fa3d850bbc6f1938601d069f, regionState=CLOSED 2024-11-23T15:25:21,184 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(170): Closed f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-23T15:25:21,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseRegionProcedure f0440d31fa3d850bbc6f1938601d069f, server=6a36843bf905,33811,1732375456985 in 1.8940 sec 2024-11-23T15:25:21,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-11-23T15:25:21,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f0440d31fa3d850bbc6f1938601d069f, UNASSIGN in 1.8970 sec 2024-11-23T15:25:21,188 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-23T15:25:21,188 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9000 sec 2024-11-23T15:25:21,189 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375521189"}]},"ts":"1732375521189"} 2024-11-23T15:25:21,190 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T15:25:21,192 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T15:25:21,193 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9120 sec 2024-11-23T15:25:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T15:25:21,387 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-23T15:25:21,387 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T15:25:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:21,389 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-23T15:25:21,390 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=63, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:21,391 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,393 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/recovered.edits] 2024-11-23T15:25:21,396 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/01ca690995a246acb81f4d52b6c0ff5b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/01ca690995a246acb81f4d52b6c0ff5b 2024-11-23T15:25:21,398 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ffffb113097b432899f0ea17761a8937 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/A/ffffb113097b432899f0ea17761a8937 2024-11-23T15:25:21,400 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/798c46cc9e684c34a62da64fbcf8ff8b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/798c46cc9e684c34a62da64fbcf8ff8b 2024-11-23T15:25:21,401 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/8993c4bb22c449e0bdec51d3c9c201f1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/B/8993c4bb22c449e0bdec51d3c9c201f1 2024-11-23T15:25:21,411 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/8860eef92cd3481882190d8fb4289e73 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/8860eef92cd3481882190d8fb4289e73 2024-11-23T15:25:21,412 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/b6ae061d5e0648b484cd18757f7e66f2 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/C/b6ae061d5e0648b484cd18757f7e66f2 2024-11-23T15:25:21,415 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/recovered.edits/314.seqid to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f/recovered.edits/314.seqid 2024-11-23T15:25:21,415 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,415 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T15:25:21,416 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T15:25:21,416 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-23T15:25:21,420 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230a1b445a793d49f8aa8e4e4d4ab1757b_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230a1b445a793d49f8aa8e4e4d4ab1757b_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,421 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112311b55d67cb6a48afa4de98197b5fa333_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112311b55d67cb6a48afa4de98197b5fa333_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,422 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112311f893ffd172452985402b7efb126a21_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112311f893ffd172452985402b7efb126a21_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,423 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112315fecaf745ac409e9498f7d6d764c1f2_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112315fecaf745ac409e9498f7d6d764c1f2_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,425 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123168ca47957b6472fa701b06523b52a1b_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123168ca47957b6472fa701b06523b52a1b_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,426 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232e75c168c91f4bd1a279a0e51db8d633_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232e75c168c91f4bd1a279a0e51db8d633_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,427 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235bb0167a12cc4afba780d51ea690bec5_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235bb0167a12cc4afba780d51ea690bec5_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,428 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235c56f3a77ed544c881eff556493721e6_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235c56f3a77ed544c881eff556493721e6_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,430 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123631a26c9998e4d3d97df0b5ec3ab5c9c_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123631a26c9998e4d3d97df0b5ec3ab5c9c_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,431 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123680904e9c8084426b20d1e2294ba9cf2_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123680904e9c8084426b20d1e2294ba9cf2_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,432 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a0871cfa0de0437186aa44bfb3bc01b2_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a0871cfa0de0437186aa44bfb3bc01b2_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,433 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a4ea646a5f5e4a42a00e2a82a37b2227_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a4ea646a5f5e4a42a00e2a82a37b2227_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,435 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c4ed605093ae4fd5bce25ece44891285_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c4ed605093ae4fd5bce25ece44891285_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,436 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c5ea1b46eea9485cafa810f90a9ec9f0_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c5ea1b46eea9485cafa810f90a9ec9f0_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,437 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cb9111ad30c142a58b7af081696d00f8_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123cb9111ad30c142a58b7af081696d00f8_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,438 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f797276b563547ffac0e128f712bec36_f0440d31fa3d850bbc6f1938601d069f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f797276b563547ffac0e128f712bec36_f0440d31fa3d850bbc6f1938601d069f 2024-11-23T15:25:21,439 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T15:25:21,441 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=63, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:21,444 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T15:25:21,446 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T15:25:21,447 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=63, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:21,447 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T15:25:21,447 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732375521447"}]},"ts":"9223372036854775807"} 2024-11-23T15:25:21,449 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T15:25:21,449 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f0440d31fa3d850bbc6f1938601d069f, NAME => 'TestAcidGuarantees,,1732375488170.f0440d31fa3d850bbc6f1938601d069f.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T15:25:21,449 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T15:25:21,449 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732375521449"}]},"ts":"9223372036854775807"} 2024-11-23T15:25:21,450 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T15:25:21,453 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=63, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:21,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 66 msec 2024-11-23T15:25:21,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-23T15:25:21,491 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-23T15:25:21,500 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238 (was 239), OpenFileDescriptor=455 (was 455), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=276 (was 287), ProcessCount=11 (was 11), AvailableMemoryMB=3785 (was 3949) 2024-11-23T15:25:21,508 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=238, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=276, ProcessCount=11, AvailableMemoryMB=3784 2024-11-23T15:25:21,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T15:25:21,509 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:25:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:21,511 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T15:25:21,511 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:21,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 64 2024-11-23T15:25:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-23T15:25:21,511 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T15:25:21,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742038_1214 (size=963) 2024-11-23T15:25:21,570 DEBUG [master/6a36843bf905:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 7c4a395faaf6c8f523e4a2ccca6ed0d7 changed from -1.0 to 0.0, refreshing cache 2024-11-23T15:25:21,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-23T15:25:21,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-23T15:25:21,920 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704 2024-11-23T15:25:21,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742039_1215 (size=53) 2024-11-23T15:25:22,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-23T15:25:22,326 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:25:22,326 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9d175c2ab5829c897c27b4bef55dd393, disabling compactions & flushes 2024-11-23T15:25:22,326 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:22,326 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:22,326 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. after waiting 0 ms 2024-11-23T15:25:22,326 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:22,326 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:22,326 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:22,327 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T15:25:22,327 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732375522327"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732375522327"}]},"ts":"1732375522327"} 2024-11-23T15:25:22,328 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T15:25:22,329 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T15:25:22,329 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375522329"}]},"ts":"1732375522329"} 2024-11-23T15:25:22,330 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T15:25:22,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9d175c2ab5829c897c27b4bef55dd393, ASSIGN}] 2024-11-23T15:25:22,336 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9d175c2ab5829c897c27b4bef55dd393, ASSIGN 2024-11-23T15:25:22,337 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9d175c2ab5829c897c27b4bef55dd393, ASSIGN; state=OFFLINE, location=6a36843bf905,33811,1732375456985; forceNewPlan=false, retain=false 2024-11-23T15:25:22,487 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=9d175c2ab5829c897c27b4bef55dd393, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:25:22,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure 9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:25:22,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-23T15:25:22,640 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:22,643 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:22,643 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:25:22,644 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:22,644 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:25:22,644 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:22,644 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:22,645 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:22,646 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:25:22,647 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9d175c2ab5829c897c27b4bef55dd393 columnFamilyName A 2024-11-23T15:25:22,647 DEBUG [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:22,647 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] regionserver.HStore(327): Store=9d175c2ab5829c897c27b4bef55dd393/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:25:22,647 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:22,648 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:25:22,648 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9d175c2ab5829c897c27b4bef55dd393 columnFamilyName B 2024-11-23T15:25:22,648 DEBUG [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:22,649 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] regionserver.HStore(327): Store=9d175c2ab5829c897c27b4bef55dd393/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:25:22,649 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:22,650 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:25:22,650 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9d175c2ab5829c897c27b4bef55dd393 columnFamilyName C 2024-11-23T15:25:22,650 DEBUG [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:22,650 INFO [StoreOpener-9d175c2ab5829c897c27b4bef55dd393-1 {}] regionserver.HStore(327): Store=9d175c2ab5829c897c27b4bef55dd393/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:25:22,650 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:22,651 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:22,651 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:22,653 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:25:22,653 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:22,655 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:25:22,656 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 9d175c2ab5829c897c27b4bef55dd393; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63851883, jitterRate=-0.04853279888629913}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:25:22,657 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:22,657 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., pid=66, masterSystemTime=1732375522640 2024-11-23T15:25:22,659 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:22,659 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:22,659 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=9d175c2ab5829c897c27b4bef55dd393, regionState=OPEN, openSeqNum=2, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:25:22,661 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-23T15:25:22,661 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure 9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 in 171 msec 2024-11-23T15:25:22,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-23T15:25:22,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9d175c2ab5829c897c27b4bef55dd393, ASSIGN in 326 msec 2024-11-23T15:25:22,663 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T15:25:22,663 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375522663"}]},"ts":"1732375522663"} 2024-11-23T15:25:22,664 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T15:25:22,666 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T15:25:22,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1570 sec 2024-11-23T15:25:23,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-23T15:25:23,615 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 64 completed 2024-11-23T15:25:23,616 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64dc42d9 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58341641 2024-11-23T15:25:23,620 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17b6adc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,621 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,623 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43586, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,624 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T15:25:23,624 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T15:25:23,626 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c1ac389 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44645c55 2024-11-23T15:25:23,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@669e1999, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,630 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x028e73c0 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64ee0130 2024-11-23T15:25:23,633 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aa9ee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,634 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c480dfb to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683b64c3 2024-11-23T15:25:23,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec09297, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,637 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34cb3991 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e55eb7 2024-11-23T15:25:23,640 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfb20f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,641 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e9ae050 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a703d2 2024-11-23T15:25:23,643 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17cf7fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,644 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14ed1e44 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78b04266 2024-11-23T15:25:23,647 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5886c0f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,648 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72537a47 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@88aa519 2024-11-23T15:25:23,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66e575aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,651 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x036642cb to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e998dd3 2024-11-23T15:25:23,655 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@131ceb8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,656 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c299cfb to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e4c79b8 2024-11-23T15:25:23,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a78bf6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,659 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x605827c9 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d1403c3 2024-11-23T15:25:23,662 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328852db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:23,667 DEBUG [hconnection-0x2fa6ce6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,667 DEBUG [hconnection-0x54710e53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,667 DEBUG [hconnection-0x71eee384-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,667 DEBUG [hconnection-0x786575f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,668 DEBUG [hconnection-0x3cd2356b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,668 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43592, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,668 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:23,668 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43598, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,668 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43596, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,668 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43612, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,668 DEBUG [hconnection-0x6a043d24-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,669 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43624, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,669 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43626, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,669 DEBUG [hconnection-0x666c232f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-11-23T15:25:23,671 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43642, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-23T15:25:23,671 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:23,672 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:23,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:23,675 DEBUG [hconnection-0x25cfb477-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,675 DEBUG [hconnection-0x16ded5f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,676 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43644, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,676 DEBUG [hconnection-0x210b1dab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:23,677 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43646, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:23,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:25:23,678 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43662, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:23,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:23,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375583698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375583699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:23,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375583700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:23,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375583700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:23,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375583701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/a7b16659eeeb48699f98c718208e35ba is 50, key is test_row_0/A:col10/1732375523675/Put/seqid=0 2024-11-23T15:25:23,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742040_1216 (size=12001) 2024-11-23T15:25:23,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/a7b16659eeeb48699f98c718208e35ba 2024-11-23T15:25:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-23T15:25:23,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/6f1b520ae3b24fefb63310ad64a7f53b is 50, key is test_row_0/B:col10/1732375523675/Put/seqid=0 2024-11-23T15:25:23,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742041_1217 (size=12001) 2024-11-23T15:25:23,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/6f1b520ae3b24fefb63310ad64a7f53b 2024-11-23T15:25:23,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:23,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375583802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:23,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375583805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:23,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375583805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:23,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375583805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:23,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375583805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,823 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-23T15:25:23,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:23,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:23,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:23,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:23,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:23,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:23,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/155d8df9687249f580bae119913a125c is 50, key is test_row_0/C:col10/1732375523675/Put/seqid=0 2024-11-23T15:25:23,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742042_1218 (size=12001) 2024-11-23T15:25:23,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/155d8df9687249f580bae119913a125c 2024-11-23T15:25:23,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/a7b16659eeeb48699f98c718208e35ba as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/a7b16659eeeb48699f98c718208e35ba 2024-11-23T15:25:23,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/a7b16659eeeb48699f98c718208e35ba, entries=150, sequenceid=14, filesize=11.7 K 2024-11-23T15:25:23,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/6f1b520ae3b24fefb63310ad64a7f53b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6f1b520ae3b24fefb63310ad64a7f53b 2024-11-23T15:25:23,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6f1b520ae3b24fefb63310ad64a7f53b, entries=150, sequenceid=14, filesize=11.7 K 2024-11-23T15:25:23,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/155d8df9687249f580bae119913a125c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/155d8df9687249f580bae119913a125c 2024-11-23T15:25:23,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/155d8df9687249f580bae119913a125c, entries=150, sequenceid=14, filesize=11.7 K 2024-11-23T15:25:23,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9d175c2ab5829c897c27b4bef55dd393 in 221ms, sequenceid=14, compaction requested=false 2024-11-23T15:25:23,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:23,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-23T15:25:23,977 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:23,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-23T15:25:23,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:23,978 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T15:25:23,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:23,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:23,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:23,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:23,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:23,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:23,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/7af5775a22d641a49242e2358234504a is 50, key is test_row_0/A:col10/1732375523693/Put/seqid=0 2024-11-23T15:25:23,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742043_1219 (size=12001) 2024-11-23T15:25:24,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:24,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:24,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375584013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375584013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375584015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375584015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375584015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375584117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375584117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375584118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375584119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375584119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-23T15:25:24,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375584320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375584320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375584322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375584322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375584324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,389 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/7af5775a22d641a49242e2358234504a 2024-11-23T15:25:24,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/6865cac2ffb64975a15a7da0d8e17daa is 50, key is test_row_0/B:col10/1732375523693/Put/seqid=0 2024-11-23T15:25:24,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742044_1220 (size=12001) 2024-11-23T15:25:24,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375584625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375584625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375584625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375584626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375584629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-23T15:25:24,803 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/6865cac2ffb64975a15a7da0d8e17daa 2024-11-23T15:25:24,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/33c7f76f0e7546f9a3ac1433e8904f27 is 50, key is test_row_0/C:col10/1732375523693/Put/seqid=0 2024-11-23T15:25:24,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742045_1221 (size=12001) 2024-11-23T15:25:25,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:25,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375585128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:25,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:25,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375585129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:25,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:25,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375585129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:25,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:25,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375585132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:25,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375585132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:25,230 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/33c7f76f0e7546f9a3ac1433e8904f27 2024-11-23T15:25:25,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/7af5775a22d641a49242e2358234504a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/7af5775a22d641a49242e2358234504a 2024-11-23T15:25:25,240 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/7af5775a22d641a49242e2358234504a, entries=150, sequenceid=37, filesize=11.7 K 2024-11-23T15:25:25,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/6865cac2ffb64975a15a7da0d8e17daa as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6865cac2ffb64975a15a7da0d8e17daa 2024-11-23T15:25:25,245 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6865cac2ffb64975a15a7da0d8e17daa, entries=150, sequenceid=37, filesize=11.7 K 2024-11-23T15:25:25,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/33c7f76f0e7546f9a3ac1433e8904f27 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/33c7f76f0e7546f9a3ac1433e8904f27 2024-11-23T15:25:25,251 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/33c7f76f0e7546f9a3ac1433e8904f27, entries=150, sequenceid=37, filesize=11.7 K 2024-11-23T15:25:25,252 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 9d175c2ab5829c897c27b4bef55dd393 in 1274ms, sequenceid=37, compaction requested=false 2024-11-23T15:25:25,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:25,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:25,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-23T15:25:25,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-11-23T15:25:25,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-23T15:25:25,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5810 sec 2024-11-23T15:25:25,256 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 1.5870 sec 2024-11-23T15:25:25,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-23T15:25:25,776 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-23T15:25:25,777 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:25,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-23T15:25:25,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T15:25:25,778 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:25,779 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:25,779 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:25,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T15:25:25,931 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:25,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T15:25:25,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:25,932 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T15:25:25,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:25,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:25,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:25,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:25,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:25,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:25,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/31efe6285ea74ecc9b9078f69e65c98b is 50, key is test_row_0/A:col10/1732375524014/Put/seqid=0 2024-11-23T15:25:25,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742046_1222 (size=12001) 2024-11-23T15:25:26,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T15:25:26,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:26,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:26,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375586148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375586149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375586150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375586150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375586151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375586253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375586253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375586254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375586254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375586255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,344 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/31efe6285ea74ecc9b9078f69e65c98b 2024-11-23T15:25:26,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/94ab22e6072547788dec48f4df2816ec is 50, key is test_row_0/B:col10/1732375524014/Put/seqid=0 2024-11-23T15:25:26,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742047_1223 (size=12001) 2024-11-23T15:25:26,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T15:25:26,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375586455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375586455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375586457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375586458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375586459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,710 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T15:25:26,756 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/94ab22e6072547788dec48f4df2816ec 2024-11-23T15:25:26,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375586761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375586761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375586761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375586762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:26,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375586763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:26,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/61b879b52b574ca79e22eae226faee54 is 50, key is test_row_0/C:col10/1732375524014/Put/seqid=0 2024-11-23T15:25:26,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742048_1224 (size=12001) 2024-11-23T15:25:26,782 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/61b879b52b574ca79e22eae226faee54 2024-11-23T15:25:26,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/31efe6285ea74ecc9b9078f69e65c98b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/31efe6285ea74ecc9b9078f69e65c98b 2024-11-23T15:25:26,791 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/31efe6285ea74ecc9b9078f69e65c98b, entries=150, sequenceid=51, filesize=11.7 K 2024-11-23T15:25:26,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/94ab22e6072547788dec48f4df2816ec as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/94ab22e6072547788dec48f4df2816ec 2024-11-23T15:25:26,798 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/94ab22e6072547788dec48f4df2816ec, entries=150, sequenceid=51, filesize=11.7 K 2024-11-23T15:25:26,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/61b879b52b574ca79e22eae226faee54 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/61b879b52b574ca79e22eae226faee54 2024-11-23T15:25:26,803 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/61b879b52b574ca79e22eae226faee54, entries=150, sequenceid=51, filesize=11.7 K 2024-11-23T15:25:26,805 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 9d175c2ab5829c897c27b4bef55dd393 in 874ms, sequenceid=51, compaction requested=true 2024-11-23T15:25:26,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:26,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:26,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-23T15:25:26,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-23T15:25:26,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-23T15:25:26,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0270 sec 2024-11-23T15:25:26,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.0320 sec 2024-11-23T15:25:26,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T15:25:26,881 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-23T15:25:26,883 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:26,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-23T15:25:26,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T15:25:26,886 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:26,887 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:26,887 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:26,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T15:25:27,039 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T15:25:27,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:27,039 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T15:25:27,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:27,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:27,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:27,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:27,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:27,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:27,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/f5cf535ee07643f193f22c3558762e99 is 50, key is test_row_0/A:col10/1732375526141/Put/seqid=0 2024-11-23T15:25:27,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742049_1225 (size=12001) 2024-11-23T15:25:27,049 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/f5cf535ee07643f193f22c3558762e99 2024-11-23T15:25:27,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/1280dfa1fbf94cefb0b5156751c39f72 is 50, key is test_row_0/B:col10/1732375526141/Put/seqid=0 2024-11-23T15:25:27,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742050_1226 (size=12001) 2024-11-23T15:25:27,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T15:25:27,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:27,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:27,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375587275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375587275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375587276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375587277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375587277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375587378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375587379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375587379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375587380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375587380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,470 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/1280dfa1fbf94cefb0b5156751c39f72 2024-11-23T15:25:27,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/636abcafa81d4c3389223cc8ec33b52a is 50, key is test_row_0/C:col10/1732375526141/Put/seqid=0 2024-11-23T15:25:27,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742051_1227 (size=12001) 2024-11-23T15:25:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T15:25:27,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375587581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375587582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375587582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375587583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375587584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,884 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/636abcafa81d4c3389223cc8ec33b52a 2024-11-23T15:25:27,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375587884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375587885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375587886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375587888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/f5cf535ee07643f193f22c3558762e99 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f5cf535ee07643f193f22c3558762e99 2024-11-23T15:25:27,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:27,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375587888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:27,893 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f5cf535ee07643f193f22c3558762e99, entries=150, sequenceid=73, filesize=11.7 K 2024-11-23T15:25:27,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/1280dfa1fbf94cefb0b5156751c39f72 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/1280dfa1fbf94cefb0b5156751c39f72 2024-11-23T15:25:27,898 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/1280dfa1fbf94cefb0b5156751c39f72, entries=150, sequenceid=73, filesize=11.7 K 2024-11-23T15:25:27,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/636abcafa81d4c3389223cc8ec33b52a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/636abcafa81d4c3389223cc8ec33b52a 2024-11-23T15:25:27,902 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/636abcafa81d4c3389223cc8ec33b52a, entries=150, sequenceid=73, filesize=11.7 K 2024-11-23T15:25:27,903 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 9d175c2ab5829c897c27b4bef55dd393 in 864ms, sequenceid=73, compaction requested=true 2024-11-23T15:25:27,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:27,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:27,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-23T15:25:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-23T15:25:27,906 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-23T15:25:27,906 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0170 sec 2024-11-23T15:25:27,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.0230 sec 2024-11-23T15:25:27,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T15:25:27,988 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-23T15:25:27,990 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:27,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-23T15:25:27,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T15:25:27,991 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:27,992 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:27,992 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:28,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T15:25:28,144 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T15:25:28,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:28,145 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T15:25:28,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:28,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:28,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:28,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:28,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:28,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:28,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/817b32710d2f4fd1b3e150b2acbd385b is 50, key is test_row_0/A:col10/1732375527276/Put/seqid=0 2024-11-23T15:25:28,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742052_1228 (size=12001) 2024-11-23T15:25:28,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T15:25:28,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:28,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:28,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375588405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375588406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375588407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375588408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375588408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375588509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375588511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375588511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375588512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375588512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,555 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/817b32710d2f4fd1b3e150b2acbd385b 2024-11-23T15:25:28,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/3c25d53b551b4d3f8f15281f77ac5470 is 50, key is test_row_0/B:col10/1732375527276/Put/seqid=0 2024-11-23T15:25:28,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742053_1229 (size=12001) 2024-11-23T15:25:28,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T15:25:28,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375588712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375588714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375588715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375588716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:28,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375588720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:28,968 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/3c25d53b551b4d3f8f15281f77ac5470 2024-11-23T15:25:28,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/cb87a155e37f47aa8508ee4c7003df8b is 50, key is test_row_0/C:col10/1732375527276/Put/seqid=0 2024-11-23T15:25:28,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742054_1230 (size=12001) 2024-11-23T15:25:29,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375589017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375589018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375589019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375589020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375589023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T15:25:29,384 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/cb87a155e37f47aa8508ee4c7003df8b 2024-11-23T15:25:29,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/817b32710d2f4fd1b3e150b2acbd385b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/817b32710d2f4fd1b3e150b2acbd385b 2024-11-23T15:25:29,393 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/817b32710d2f4fd1b3e150b2acbd385b, entries=150, sequenceid=88, filesize=11.7 K 2024-11-23T15:25:29,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/3c25d53b551b4d3f8f15281f77ac5470 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/3c25d53b551b4d3f8f15281f77ac5470 2024-11-23T15:25:29,397 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/3c25d53b551b4d3f8f15281f77ac5470, entries=150, sequenceid=88, filesize=11.7 K 2024-11-23T15:25:29,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/cb87a155e37f47aa8508ee4c7003df8b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/cb87a155e37f47aa8508ee4c7003df8b 2024-11-23T15:25:29,403 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/cb87a155e37f47aa8508ee4c7003df8b, entries=150, sequenceid=88, filesize=11.7 K 2024-11-23T15:25:29,404 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 9d175c2ab5829c897c27b4bef55dd393 in 1259ms, sequenceid=88, compaction requested=true 2024-11-23T15:25:29,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:29,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:29,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-23T15:25:29,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-23T15:25:29,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-23T15:25:29,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4130 sec 2024-11-23T15:25:29,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.4170 sec 2024-11-23T15:25:29,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:29,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T15:25:29,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:29,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:29,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:29,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:29,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:29,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:29,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/5afe36eacc24481ab7cc3180c5615858 is 50, key is test_row_0/A:col10/1732375529522/Put/seqid=0 2024-11-23T15:25:29,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742055_1231 (size=14341) 2024-11-23T15:25:29,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375589529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375589529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375589530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375589531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375589533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375589634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375589634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375589635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375589636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375589637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375589837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375589838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375589838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375589840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:29,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375589841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:29,933 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/5afe36eacc24481ab7cc3180c5615858 2024-11-23T15:25:29,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a460fe83f4984283861e4dd2fb635672 is 50, key is test_row_0/B:col10/1732375529522/Put/seqid=0 2024-11-23T15:25:29,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742056_1232 (size=12001) 2024-11-23T15:25:30,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T15:25:30,095 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-23T15:25:30,097 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:30,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-23T15:25:30,098 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:30,099 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:30,099 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:30,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T15:25:30,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:30,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375590142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:30,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375590142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:30,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375590143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:30,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375590143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:30,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375590145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T15:25:30,250 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-23T15:25:30,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:30,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a460fe83f4984283861e4dd2fb635672 2024-11-23T15:25:30,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/c5d0a09527d0467188e2459a2245dd24 is 50, key is test_row_0/C:col10/1732375529522/Put/seqid=0 2024-11-23T15:25:30,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742057_1233 (size=12001) 2024-11-23T15:25:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T15:25:30,403 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-23T15:25:30,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:30,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,556 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-23T15:25:30,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:30,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375590647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375590647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375590649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375590649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375590651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T15:25:30,709 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-23T15:25:30,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:30,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,710 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:30,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/c5d0a09527d0467188e2459a2245dd24 2024-11-23T15:25:30,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/5afe36eacc24481ab7cc3180c5615858 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/5afe36eacc24481ab7cc3180c5615858 2024-11-23T15:25:30,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/5afe36eacc24481ab7cc3180c5615858, entries=200, sequenceid=112, filesize=14.0 K 2024-11-23T15:25:30,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a460fe83f4984283861e4dd2fb635672 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a460fe83f4984283861e4dd2fb635672 2024-11-23T15:25:30,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a460fe83f4984283861e4dd2fb635672, entries=150, sequenceid=112, filesize=11.7 K 2024-11-23T15:25:30,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/c5d0a09527d0467188e2459a2245dd24 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c5d0a09527d0467188e2459a2245dd24 2024-11-23T15:25:30,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c5d0a09527d0467188e2459a2245dd24, entries=150, sequenceid=112, filesize=11.7 K 2024-11-23T15:25:30,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 9d175c2ab5829c897c27b4bef55dd393 in 1264ms, sequenceid=112, compaction requested=true 2024-11-23T15:25:30,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:30,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:30,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:30,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:30,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:30,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:30,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:30,786 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-23T15:25:30,786 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-23T15:25:30,788 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72006 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-23T15:25:30,788 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 74346 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-23T15:25:30,788 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/B is initiating minor compaction (all files) 2024-11-23T15:25:30,788 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/A is initiating minor compaction (all files) 2024-11-23T15:25:30,788 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/B in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,788 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/A in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,789 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/a7b16659eeeb48699f98c718208e35ba, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/7af5775a22d641a49242e2358234504a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/31efe6285ea74ecc9b9078f69e65c98b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f5cf535ee07643f193f22c3558762e99, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/817b32710d2f4fd1b3e150b2acbd385b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/5afe36eacc24481ab7cc3180c5615858] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=72.6 K 2024-11-23T15:25:30,789 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6f1b520ae3b24fefb63310ad64a7f53b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6865cac2ffb64975a15a7da0d8e17daa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/94ab22e6072547788dec48f4df2816ec, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/1280dfa1fbf94cefb0b5156751c39f72, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/3c25d53b551b4d3f8f15281f77ac5470, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a460fe83f4984283861e4dd2fb635672] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=70.3 K 2024-11-23T15:25:30,789 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7b16659eeeb48699f98c718208e35ba, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732375523675 2024-11-23T15:25:30,789 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f1b520ae3b24fefb63310ad64a7f53b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732375523675 2024-11-23T15:25:30,789 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7af5775a22d641a49242e2358234504a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732375523693 2024-11-23T15:25:30,789 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6865cac2ffb64975a15a7da0d8e17daa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732375523693 2024-11-23T15:25:30,790 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31efe6285ea74ecc9b9078f69e65c98b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732375524014 2024-11-23T15:25:30,790 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 94ab22e6072547788dec48f4df2816ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732375524014 2024-11-23T15:25:30,790 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5cf535ee07643f193f22c3558762e99, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732375526141 2024-11-23T15:25:30,790 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1280dfa1fbf94cefb0b5156751c39f72, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732375526141 2024-11-23T15:25:30,791 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 817b32710d2f4fd1b3e150b2acbd385b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732375527274 2024-11-23T15:25:30,791 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c25d53b551b4d3f8f15281f77ac5470, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732375527274 2024-11-23T15:25:30,791 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5afe36eacc24481ab7cc3180c5615858, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732375528403 2024-11-23T15:25:30,791 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a460fe83f4984283861e4dd2fb635672, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732375528403 2024-11-23T15:25:30,812 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#B#compaction#192 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:30,812 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#A#compaction#193 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:30,812 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/28b3b1c023234af380514983526dacb5 is 50, key is test_row_0/B:col10/1732375529522/Put/seqid=0 2024-11-23T15:25:30,813 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/f1ce2c8c04e24c65bbeeb736cafd8122 is 50, key is test_row_0/A:col10/1732375529522/Put/seqid=0 2024-11-23T15:25:30,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742058_1234 (size=12207) 2024-11-23T15:25:30,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742059_1235 (size=12207) 2024-11-23T15:25:30,862 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:30,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-23T15:25:30,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,863 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:25:30,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:30,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:30,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:30,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:30,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:30,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:30,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/c65caff867e94621940be26ed817fcff is 50, key is test_row_0/A:col10/1732375529529/Put/seqid=0 2024-11-23T15:25:30,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742060_1236 (size=12001) 2024-11-23T15:25:30,873 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/c65caff867e94621940be26ed817fcff 2024-11-23T15:25:30,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a334d2f1142847e390f2665b774f2d2d is 50, key is test_row_0/B:col10/1732375529529/Put/seqid=0 2024-11-23T15:25:30,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742061_1237 (size=12001) 2024-11-23T15:25:30,884 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a334d2f1142847e390f2665b774f2d2d 2024-11-23T15:25:30,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/9d9a0ec19bf147c485d18e3ee53fe2aa is 50, key is test_row_0/C:col10/1732375529529/Put/seqid=0 2024-11-23T15:25:30,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742062_1238 (size=12001) 2024-11-23T15:25:30,896 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/9d9a0ec19bf147c485d18e3ee53fe2aa 2024-11-23T15:25:30,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/c65caff867e94621940be26ed817fcff as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c65caff867e94621940be26ed817fcff 2024-11-23T15:25:30,904 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c65caff867e94621940be26ed817fcff, entries=150, sequenceid=124, filesize=11.7 K 2024-11-23T15:25:30,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a334d2f1142847e390f2665b774f2d2d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a334d2f1142847e390f2665b774f2d2d 2024-11-23T15:25:30,909 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a334d2f1142847e390f2665b774f2d2d, entries=150, sequenceid=124, filesize=11.7 K 2024-11-23T15:25:30,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/9d9a0ec19bf147c485d18e3ee53fe2aa as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9d9a0ec19bf147c485d18e3ee53fe2aa 2024-11-23T15:25:30,913 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9d9a0ec19bf147c485d18e3ee53fe2aa, entries=150, sequenceid=124, filesize=11.7 K 2024-11-23T15:25:30,914 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for 9d175c2ab5829c897c27b4bef55dd393 in 51ms, sequenceid=124, compaction requested=true 2024-11-23T15:25:30,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:30,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:30,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-23T15:25:30,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-23T15:25:30,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-23T15:25:30,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 817 msec 2024-11-23T15:25:30,919 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 821 msec 2024-11-23T15:25:31,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T15:25:31,203 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-23T15:25:31,205 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:31,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-23T15:25:31,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T15:25:31,207 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:31,207 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:31,207 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:31,232 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/28b3b1c023234af380514983526dacb5 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/28b3b1c023234af380514983526dacb5 2024-11-23T15:25:31,232 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/f1ce2c8c04e24c65bbeeb736cafd8122 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f1ce2c8c04e24c65bbeeb736cafd8122 2024-11-23T15:25:31,238 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/B of 9d175c2ab5829c897c27b4bef55dd393 into 28b3b1c023234af380514983526dacb5(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:31,238 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:31,238 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/B, priority=10, startTime=1732375530786; duration=0sec 2024-11-23T15:25:31,238 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:31,238 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:B 2024-11-23T15:25:31,238 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 0 compacting, 7 eligible, 16 blocking 2024-11-23T15:25:31,238 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/A of 9d175c2ab5829c897c27b4bef55dd393 into f1ce2c8c04e24c65bbeeb736cafd8122(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:31,238 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:31,238 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/A, priority=10, startTime=1732375530786; duration=0sec 2024-11-23T15:25:31,238 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:31,238 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:A 2024-11-23T15:25:31,241 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 7 files of size 84007 starting at candidate #0 after considering 15 permutations with 15 in ratio 2024-11-23T15:25:31,241 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/C is initiating minor compaction (all files) 2024-11-23T15:25:31,241 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/C in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:31,241 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/155d8df9687249f580bae119913a125c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/33c7f76f0e7546f9a3ac1433e8904f27, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/61b879b52b574ca79e22eae226faee54, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/636abcafa81d4c3389223cc8ec33b52a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/cb87a155e37f47aa8508ee4c7003df8b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c5d0a09527d0467188e2459a2245dd24, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9d9a0ec19bf147c485d18e3ee53fe2aa] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=82.0 K 2024-11-23T15:25:31,241 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 155d8df9687249f580bae119913a125c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732375523675 2024-11-23T15:25:31,242 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 33c7f76f0e7546f9a3ac1433e8904f27, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732375523693 2024-11-23T15:25:31,242 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 61b879b52b574ca79e22eae226faee54, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732375524014 2024-11-23T15:25:31,243 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 636abcafa81d4c3389223cc8ec33b52a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732375526141 2024-11-23T15:25:31,243 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting cb87a155e37f47aa8508ee4c7003df8b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732375527274 2024-11-23T15:25:31,243 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c5d0a09527d0467188e2459a2245dd24, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732375528403 2024-11-23T15:25:31,244 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d9a0ec19bf147c485d18e3ee53fe2aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1732375529527 2024-11-23T15:25:31,262 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#C#compaction#197 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:31,263 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/97c39e6784a64f43a34279a8a1795576 is 50, key is test_row_0/C:col10/1732375529529/Put/seqid=0 2024-11-23T15:25:31,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742063_1239 (size=12241) 2024-11-23T15:25:31,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T15:25:31,313 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/97c39e6784a64f43a34279a8a1795576 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/97c39e6784a64f43a34279a8a1795576 2024-11-23T15:25:31,318 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 7 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/C of 9d175c2ab5829c897c27b4bef55dd393 into 97c39e6784a64f43a34279a8a1795576(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:31,318 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:31,318 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/C, priority=9, startTime=1732375530786; duration=0sec 2024-11-23T15:25:31,318 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:31,318 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:C 2024-11-23T15:25:31,359 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-23T15:25:31,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:31,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:31,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:31,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-23T15:25:31,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-23T15:25:31,363 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-23T15:25:31,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 154 msec 2024-11-23T15:25:31,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 159 msec 2024-11-23T15:25:31,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T15:25:31,509 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-23T15:25:31,510 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:31,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-23T15:25:31,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T15:25:31,512 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:31,513 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:31,513 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T15:25:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:31,659 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:25:31,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:31,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:31,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:31,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:31,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:31,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:31,664 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T15:25:31,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/dff9c4325ede4685a5436aeac5acf34e is 50, key is test_row_0/A:col10/1732375531658/Put/seqid=0 2024-11-23T15:25:31,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:31,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:31,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:31,665 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:31,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:31,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:31,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742064_1240 (size=12151) 2024-11-23T15:25:31,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/dff9c4325ede4685a5436aeac5acf34e 2024-11-23T15:25:31,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/139b1e9aa79e4bc986435b489d23bf98 is 50, key is test_row_0/B:col10/1732375531658/Put/seqid=0 2024-11-23T15:25:31,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375591678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742065_1241 (size=12151) 2024-11-23T15:25:31,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375591679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375591679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375591680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375591680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375591783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375591783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375591783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375591784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375591784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T15:25:31,817 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T15:25:31,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:31,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:31,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:31,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:31,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:31,970 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T15:25:31,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:31,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:31,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:31,971 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:31,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:31,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375591987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375591986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375591987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375591987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:31,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375591987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/139b1e9aa79e4bc986435b489d23bf98 2024-11-23T15:25:32,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/70e4bb4f9c864257a6b86d3f21cb1ae2 is 50, key is test_row_0/C:col10/1732375531658/Put/seqid=0 2024-11-23T15:25:32,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742066_1242 (size=12151) 2024-11-23T15:25:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T15:25:32,122 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,123 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T15:25:32,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:32,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:32,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:32,123 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:32,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:32,275 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T15:25:32,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:32,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:32,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:32,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:32,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:32,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:32,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375592290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375592290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375592290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375592292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375592292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T15:25:32,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:32,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:32,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:32,429 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:32,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:32,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:32,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/70e4bb4f9c864257a6b86d3f21cb1ae2 2024-11-23T15:25:32,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/dff9c4325ede4685a5436aeac5acf34e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/dff9c4325ede4685a5436aeac5acf34e 2024-11-23T15:25:32,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/dff9c4325ede4685a5436aeac5acf34e, entries=150, sequenceid=139, filesize=11.9 K 2024-11-23T15:25:32,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/139b1e9aa79e4bc986435b489d23bf98 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/139b1e9aa79e4bc986435b489d23bf98 2024-11-23T15:25:32,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/139b1e9aa79e4bc986435b489d23bf98, entries=150, sequenceid=139, filesize=11.9 K 2024-11-23T15:25:32,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/70e4bb4f9c864257a6b86d3f21cb1ae2 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/70e4bb4f9c864257a6b86d3f21cb1ae2 2024-11-23T15:25:32,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/70e4bb4f9c864257a6b86d3f21cb1ae2, entries=150, sequenceid=139, filesize=11.9 K 2024-11-23T15:25:32,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9d175c2ab5829c897c27b4bef55dd393 in 860ms, sequenceid=139, compaction requested=true 2024-11-23T15:25:32,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:32,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:32,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:32,519 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:32,519 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:32,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:32,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:32,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:32,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:32,521 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:32,521 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/A is initiating minor compaction (all files) 2024-11-23T15:25:32,521 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:32,521 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/A in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:32,521 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/B is initiating minor compaction (all files) 2024-11-23T15:25:32,521 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f1ce2c8c04e24c65bbeeb736cafd8122, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c65caff867e94621940be26ed817fcff, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/dff9c4325ede4685a5436aeac5acf34e] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=35.5 K 2024-11-23T15:25:32,521 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/B in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:32,521 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/28b3b1c023234af380514983526dacb5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a334d2f1142847e390f2665b774f2d2d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/139b1e9aa79e4bc986435b489d23bf98] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=35.5 K 2024-11-23T15:25:32,521 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1ce2c8c04e24c65bbeeb736cafd8122, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732375528403 2024-11-23T15:25:32,521 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 28b3b1c023234af380514983526dacb5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732375528403 2024-11-23T15:25:32,522 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c65caff867e94621940be26ed817fcff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1732375529527 2024-11-23T15:25:32,522 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting dff9c4325ede4685a5436aeac5acf34e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732375531654 2024-11-23T15:25:32,522 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a334d2f1142847e390f2665b774f2d2d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1732375529527 2024-11-23T15:25:32,522 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 139b1e9aa79e4bc986435b489d23bf98, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732375531654 2024-11-23T15:25:32,541 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#A#compaction#201 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:32,542 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/32bbf6f6eb3843188f49c3a7433f47a8 is 50, key is test_row_0/A:col10/1732375531658/Put/seqid=0 2024-11-23T15:25:32,544 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#B#compaction#202 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:32,544 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/24708821b2034c5383a793e312eea481 is 50, key is test_row_0/B:col10/1732375531658/Put/seqid=0 2024-11-23T15:25:32,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742067_1243 (size=12459) 2024-11-23T15:25:32,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742068_1244 (size=12459) 2024-11-23T15:25:32,581 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T15:25:32,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:32,582 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:25:32,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:32,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:32,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:32,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:32,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:32,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:32,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/2fed2d6d0b2e449f8725510f6bb5ba57 is 50, key is test_row_0/A:col10/1732375531679/Put/seqid=0 2024-11-23T15:25:32,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742069_1245 (size=12151) 2024-11-23T15:25:32,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T15:25:32,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:32,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:32,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375592799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375592800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375592800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375592801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375592801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375592904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375592904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375592904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375592904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:32,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375592904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:32,973 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/24708821b2034c5383a793e312eea481 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/24708821b2034c5383a793e312eea481 2024-11-23T15:25:32,978 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/B of 9d175c2ab5829c897c27b4bef55dd393 into 24708821b2034c5383a793e312eea481(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:32,978 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:32,978 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/B, priority=13, startTime=1732375532519; duration=0sec 2024-11-23T15:25:32,978 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:32,978 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:B 2024-11-23T15:25:32,978 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T15:25:32,979 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/32bbf6f6eb3843188f49c3a7433f47a8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/32bbf6f6eb3843188f49c3a7433f47a8 2024-11-23T15:25:32,979 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T15:25:32,979 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T15:25:32,979 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. because compaction request was cancelled 2024-11-23T15:25:32,979 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:C 2024-11-23T15:25:32,988 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/A of 9d175c2ab5829c897c27b4bef55dd393 into 32bbf6f6eb3843188f49c3a7433f47a8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:32,988 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:32,988 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/A, priority=13, startTime=1732375532519; duration=0sec 2024-11-23T15:25:32,988 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:32,988 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:A 2024-11-23T15:25:32,995 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/2fed2d6d0b2e449f8725510f6bb5ba57 2024-11-23T15:25:33,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b4cbeda552974e8dabdeb0448f380148 is 50, key is test_row_0/B:col10/1732375531679/Put/seqid=0 2024-11-23T15:25:33,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742070_1246 (size=12151) 2024-11-23T15:25:33,008 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b4cbeda552974e8dabdeb0448f380148 2024-11-23T15:25:33,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/29973944ec6b496589f5f334bd48a8d5 is 50, key is test_row_0/C:col10/1732375531679/Put/seqid=0 2024-11-23T15:25:33,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742071_1247 (size=12151) 2024-11-23T15:25:33,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375593106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375593107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375593107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375593107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375593108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375593409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375593410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375593410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375593411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375593411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,420 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/29973944ec6b496589f5f334bd48a8d5 2024-11-23T15:25:33,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/2fed2d6d0b2e449f8725510f6bb5ba57 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2fed2d6d0b2e449f8725510f6bb5ba57 2024-11-23T15:25:33,429 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2fed2d6d0b2e449f8725510f6bb5ba57, entries=150, sequenceid=164, filesize=11.9 K 2024-11-23T15:25:33,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b4cbeda552974e8dabdeb0448f380148 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b4cbeda552974e8dabdeb0448f380148 2024-11-23T15:25:33,435 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b4cbeda552974e8dabdeb0448f380148, entries=150, sequenceid=164, filesize=11.9 K 2024-11-23T15:25:33,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/29973944ec6b496589f5f334bd48a8d5 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/29973944ec6b496589f5f334bd48a8d5 2024-11-23T15:25:33,439 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/29973944ec6b496589f5f334bd48a8d5, entries=150, sequenceid=164, filesize=11.9 K 2024-11-23T15:25:33,444 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 9d175c2ab5829c897c27b4bef55dd393 in 862ms, sequenceid=164, compaction requested=true 2024-11-23T15:25:33,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:33,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:33,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-23T15:25:33,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-23T15:25:33,447 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-23T15:25:33,447 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9330 sec 2024-11-23T15:25:33,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.9380 sec 2024-11-23T15:25:33,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T15:25:33,616 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-23T15:25:33,618 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:33,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-23T15:25:33,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T15:25:33,620 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:33,621 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:33,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:33,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T15:25:33,772 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-23T15:25:33,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:33,773 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:25:33,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:33,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:33,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:33,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:33,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:33,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:33,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/bc19e12c8f644bcea03932e4830a60b9 is 50, key is test_row_0/A:col10/1732375532800/Put/seqid=0 2024-11-23T15:25:33,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742072_1248 (size=12151) 2024-11-23T15:25:33,784 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/bc19e12c8f644bcea03932e4830a60b9 2024-11-23T15:25:33,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/8c8a2a64a8d04ff4b196fb0a46aeff33 is 50, key is test_row_0/B:col10/1732375532800/Put/seqid=0 2024-11-23T15:25:33,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742073_1249 (size=12151) 2024-11-23T15:25:33,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:33,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:33,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T15:25:33,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375593932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375593933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375593933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375593934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:33,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:33,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375593935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375594036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375594038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375594038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375594039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375594039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,202 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/8c8a2a64a8d04ff4b196fb0a46aeff33 2024-11-23T15:25:34,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/6a7ee4a7436e43989affdd155ba6f8bb is 50, key is test_row_0/C:col10/1732375532800/Put/seqid=0 2024-11-23T15:25:34,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742074_1250 (size=12151) 2024-11-23T15:25:34,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T15:25:34,222 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/6a7ee4a7436e43989affdd155ba6f8bb 2024-11-23T15:25:34,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/bc19e12c8f644bcea03932e4830a60b9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/bc19e12c8f644bcea03932e4830a60b9 2024-11-23T15:25:34,232 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/bc19e12c8f644bcea03932e4830a60b9, entries=150, sequenceid=177, filesize=11.9 K 2024-11-23T15:25:34,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/8c8a2a64a8d04ff4b196fb0a46aeff33 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/8c8a2a64a8d04ff4b196fb0a46aeff33 2024-11-23T15:25:34,237 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/8c8a2a64a8d04ff4b196fb0a46aeff33, entries=150, sequenceid=177, filesize=11.9 K 2024-11-23T15:25:34,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/6a7ee4a7436e43989affdd155ba6f8bb as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/6a7ee4a7436e43989affdd155ba6f8bb 2024-11-23T15:25:34,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375594238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375594241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,243 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/6a7ee4a7436e43989affdd155ba6f8bb, entries=150, sequenceid=177, filesize=11.9 K 2024-11-23T15:25:34,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375594241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,244 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9d175c2ab5829c897c27b4bef55dd393 in 471ms, sequenceid=177, compaction requested=true 2024-11-23T15:25:34,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:34,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:34,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-23T15:25:34,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:34,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-23T15:25:34,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T15:25:34,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:34,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:34,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:34,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:34,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:34,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:34,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-23T15:25:34,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 625 msec 2024-11-23T15:25:34,250 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 631 msec 2024-11-23T15:25:34,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/14acc8db6f934ef98fb7f2cfc7af96eb is 50, key is test_row_0/A:col10/1732375534243/Put/seqid=0 2024-11-23T15:25:34,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375594260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375594261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742075_1251 (size=14541) 2024-11-23T15:25:34,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375594364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375594364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375594542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375594545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375594545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375594567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375594567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/14acc8db6f934ef98fb7f2cfc7af96eb 2024-11-23T15:25:34,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/eb1d749540f145f0af34a381118faabb is 50, key is test_row_0/B:col10/1732375534243/Put/seqid=0 2024-11-23T15:25:34,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742076_1252 (size=12151) 2024-11-23T15:25:34,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T15:25:34,723 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-23T15:25:34,724 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:34,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-23T15:25:34,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T15:25:34,726 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:34,726 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:34,727 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:34,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T15:25:34,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375594870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:34,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375594871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,878 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:34,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-23T15:25:34,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:34,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:34,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:34,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:34,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:34,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T15:25:35,030 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:35,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-23T15:25:35,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:35,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:35,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375595044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:35,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375595051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:35,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375595051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:35,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/eb1d749540f145f0af34a381118faabb 2024-11-23T15:25:35,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/9cf24856301b4f2184f945a9996a1439 is 50, key is test_row_0/C:col10/1732375534243/Put/seqid=0 2024-11-23T15:25:35,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742077_1253 (size=12151) 2024-11-23T15:25:35,183 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:35,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-23T15:25:35,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:35,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,184 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T15:25:35,338 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:35,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-23T15:25:35,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:35,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:35,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375595373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:35,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375595375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:35,492 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:35,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-23T15:25:35,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:35,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:35,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/9cf24856301b4f2184f945a9996a1439 2024-11-23T15:25:35,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/14acc8db6f934ef98fb7f2cfc7af96eb as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/14acc8db6f934ef98fb7f2cfc7af96eb 2024-11-23T15:25:35,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/14acc8db6f934ef98fb7f2cfc7af96eb, entries=200, sequenceid=204, filesize=14.2 K 2024-11-23T15:25:35,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/eb1d749540f145f0af34a381118faabb as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/eb1d749540f145f0af34a381118faabb 2024-11-23T15:25:35,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/eb1d749540f145f0af34a381118faabb, entries=150, sequenceid=204, filesize=11.9 K 2024-11-23T15:25:35,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/9cf24856301b4f2184f945a9996a1439 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9cf24856301b4f2184f945a9996a1439 2024-11-23T15:25:35,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9cf24856301b4f2184f945a9996a1439, entries=150, sequenceid=204, filesize=11.9 K 2024-11-23T15:25:35,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 9d175c2ab5829c897c27b4bef55dd393 in 1282ms, sequenceid=204, compaction requested=true 2024-11-23T15:25:35,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:35,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:35,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:35,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:35,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:35,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:35,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:35,527 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:35,527 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:35,529 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:35,529 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51302 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:35,529 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/B is initiating minor compaction (all files) 2024-11-23T15:25:35,529 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/A is initiating minor compaction (all files) 2024-11-23T15:25:35,529 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/B in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,529 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/A in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,529 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/24708821b2034c5383a793e312eea481, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b4cbeda552974e8dabdeb0448f380148, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/8c8a2a64a8d04ff4b196fb0a46aeff33, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/eb1d749540f145f0af34a381118faabb] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=47.8 K 2024-11-23T15:25:35,529 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/32bbf6f6eb3843188f49c3a7433f47a8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2fed2d6d0b2e449f8725510f6bb5ba57, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/bc19e12c8f644bcea03932e4830a60b9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/14acc8db6f934ef98fb7f2cfc7af96eb] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=50.1 K 2024-11-23T15:25:35,530 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 24708821b2034c5383a793e312eea481, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732375531654 2024-11-23T15:25:35,530 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32bbf6f6eb3843188f49c3a7433f47a8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732375531654 2024-11-23T15:25:35,530 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b4cbeda552974e8dabdeb0448f380148, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732375531678 2024-11-23T15:25:35,530 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fed2d6d0b2e449f8725510f6bb5ba57, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732375531678 2024-11-23T15:25:35,530 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c8a2a64a8d04ff4b196fb0a46aeff33, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732375532799 2024-11-23T15:25:35,531 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting eb1d749540f145f0af34a381118faabb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732375533933 2024-11-23T15:25:35,531 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc19e12c8f644bcea03932e4830a60b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732375532799 2024-11-23T15:25:35,531 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14acc8db6f934ef98fb7f2cfc7af96eb, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732375533932 2024-11-23T15:25:35,541 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#A#compaction#213 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:35,541 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#B#compaction#212 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:35,542 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/635e4ceba9af4ec6b7975dc9f40ae841 is 50, key is test_row_0/A:col10/1732375534243/Put/seqid=0 2024-11-23T15:25:35,542 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/988eae2626af4a68b3e6ffe279fb02ec is 50, key is test_row_0/B:col10/1732375534243/Put/seqid=0 2024-11-23T15:25:35,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742079_1255 (size=12595) 2024-11-23T15:25:35,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742078_1254 (size=12595) 2024-11-23T15:25:35,563 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/635e4ceba9af4ec6b7975dc9f40ae841 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/635e4ceba9af4ec6b7975dc9f40ae841 2024-11-23T15:25:35,568 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/A of 9d175c2ab5829c897c27b4bef55dd393 into 635e4ceba9af4ec6b7975dc9f40ae841(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:35,568 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:35,568 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/A, priority=12, startTime=1732375535527; duration=0sec 2024-11-23T15:25:35,568 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:35,568 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:A 2024-11-23T15:25:35,568 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T15:25:35,570 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60845 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T15:25:35,570 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/C is initiating minor compaction (all files) 2024-11-23T15:25:35,570 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/C in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,570 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/97c39e6784a64f43a34279a8a1795576, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/70e4bb4f9c864257a6b86d3f21cb1ae2, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/29973944ec6b496589f5f334bd48a8d5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/6a7ee4a7436e43989affdd155ba6f8bb, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9cf24856301b4f2184f945a9996a1439] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=59.4 K 2024-11-23T15:25:35,570 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97c39e6784a64f43a34279a8a1795576, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1732375529527 2024-11-23T15:25:35,571 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70e4bb4f9c864257a6b86d3f21cb1ae2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1732375531654 2024-11-23T15:25:35,571 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29973944ec6b496589f5f334bd48a8d5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732375531678 2024-11-23T15:25:35,571 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a7ee4a7436e43989affdd155ba6f8bb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732375532799 2024-11-23T15:25:35,572 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cf24856301b4f2184f945a9996a1439, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732375533933 2024-11-23T15:25:35,583 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#C#compaction#214 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:35,584 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/f20ead99a46a4620b172f445d6faa565 is 50, key is test_row_0/C:col10/1732375534243/Put/seqid=0 2024-11-23T15:25:35,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742080_1256 (size=12561) 2024-11-23T15:25:35,647 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:35,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-23T15:25:35,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:35,648 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-23T15:25:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:35,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/d16e211eb5f34fb884225d5d17da3a6d is 50, key is test_row_0/A:col10/1732375534252/Put/seqid=0 2024-11-23T15:25:35,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742081_1257 (size=12151) 2024-11-23T15:25:35,660 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/d16e211eb5f34fb884225d5d17da3a6d 2024-11-23T15:25:35,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/ff01e6cd65b84bab88f0c1d0d4d61307 is 50, key is test_row_0/B:col10/1732375534252/Put/seqid=0 2024-11-23T15:25:35,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742082_1258 (size=12151) 2024-11-23T15:25:35,675 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/ff01e6cd65b84bab88f0c1d0d4d61307 2024-11-23T15:25:35,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/9fb15d5ef3864ef599eb3305a5476fd3 is 50, key is test_row_0/C:col10/1732375534252/Put/seqid=0 2024-11-23T15:25:35,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742083_1259 (size=12151) 2024-11-23T15:25:35,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T15:25:35,964 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/988eae2626af4a68b3e6ffe279fb02ec as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/988eae2626af4a68b3e6ffe279fb02ec 2024-11-23T15:25:35,969 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/B of 9d175c2ab5829c897c27b4bef55dd393 into 988eae2626af4a68b3e6ffe279fb02ec(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:35,969 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:35,969 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/B, priority=12, startTime=1732375535527; duration=0sec 2024-11-23T15:25:35,969 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:35,969 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:B 2024-11-23T15:25:36,005 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/f20ead99a46a4620b172f445d6faa565 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/f20ead99a46a4620b172f445d6faa565 2024-11-23T15:25:36,010 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/C of 9d175c2ab5829c897c27b4bef55dd393 into f20ead99a46a4620b172f445d6faa565(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:36,010 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:36,010 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/C, priority=11, startTime=1732375535527; duration=0sec 2024-11-23T15:25:36,010 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:36,010 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:C 2024-11-23T15:25:36,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:36,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:36,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375596081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375596082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375596083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,093 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/9fb15d5ef3864ef599eb3305a5476fd3 2024-11-23T15:25:36,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/d16e211eb5f34fb884225d5d17da3a6d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/d16e211eb5f34fb884225d5d17da3a6d 2024-11-23T15:25:36,103 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/d16e211eb5f34fb884225d5d17da3a6d, entries=150, sequenceid=214, filesize=11.9 K 2024-11-23T15:25:36,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/ff01e6cd65b84bab88f0c1d0d4d61307 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ff01e6cd65b84bab88f0c1d0d4d61307 2024-11-23T15:25:36,108 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ff01e6cd65b84bab88f0c1d0d4d61307, entries=150, sequenceid=214, filesize=11.9 K 2024-11-23T15:25:36,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/9fb15d5ef3864ef599eb3305a5476fd3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9fb15d5ef3864ef599eb3305a5476fd3 2024-11-23T15:25:36,113 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9fb15d5ef3864ef599eb3305a5476fd3, entries=150, sequenceid=214, filesize=11.9 K 2024-11-23T15:25:36,113 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 9d175c2ab5829c897c27b4bef55dd393 in 465ms, sequenceid=214, compaction requested=false 2024-11-23T15:25:36,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:36,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:36,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-23T15:25:36,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-23T15:25:36,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-23T15:25:36,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3880 sec 2024-11-23T15:25:36,118 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.3930 sec 2024-11-23T15:25:36,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:36,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-23T15:25:36,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:36,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:36,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:36,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:36,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:36,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:36,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/fec9ca9a317a419bbb3e91e1dfd75cbc is 50, key is test_row_0/A:col10/1732375536185/Put/seqid=0 2024-11-23T15:25:36,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375596190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375596193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375596193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742084_1260 (size=12151) 2024-11-23T15:25:36,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/fec9ca9a317a419bbb3e91e1dfd75cbc 2024-11-23T15:25:36,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/c6cfaf421727447285d7c06b37807313 is 50, key is test_row_0/B:col10/1732375536185/Put/seqid=0 2024-11-23T15:25:36,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742085_1261 (size=12151) 2024-11-23T15:25:36,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/c6cfaf421727447285d7c06b37807313 2024-11-23T15:25:36,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/056c93d9f50942e8a697a059def46fd0 is 50, key is test_row_0/C:col10/1732375536185/Put/seqid=0 2024-11-23T15:25:36,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742086_1262 (size=12151) 2024-11-23T15:25:36,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/056c93d9f50942e8a697a059def46fd0 2024-11-23T15:25:36,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/fec9ca9a317a419bbb3e91e1dfd75cbc as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/fec9ca9a317a419bbb3e91e1dfd75cbc 2024-11-23T15:25:36,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/fec9ca9a317a419bbb3e91e1dfd75cbc, entries=150, sequenceid=245, filesize=11.9 K 2024-11-23T15:25:36,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/c6cfaf421727447285d7c06b37807313 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c6cfaf421727447285d7c06b37807313 2024-11-23T15:25:36,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c6cfaf421727447285d7c06b37807313, entries=150, sequenceid=245, filesize=11.9 K 2024-11-23T15:25:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/056c93d9f50942e8a697a059def46fd0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/056c93d9f50942e8a697a059def46fd0 2024-11-23T15:25:36,292 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/056c93d9f50942e8a697a059def46fd0, entries=150, sequenceid=245, filesize=11.9 K 2024-11-23T15:25:36,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 9d175c2ab5829c897c27b4bef55dd393 in 107ms, sequenceid=245, compaction requested=true 2024-11-23T15:25:36,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:36,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:36,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:36,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:36,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:36,293 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:36,294 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:36,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:36,295 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:36,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:36,295 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/A is initiating minor compaction (all files) 2024-11-23T15:25:36,295 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/A in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:36,295 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/635e4ceba9af4ec6b7975dc9f40ae841, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/d16e211eb5f34fb884225d5d17da3a6d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/fec9ca9a317a419bbb3e91e1dfd75cbc] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=36.0 K 2024-11-23T15:25:36,296 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:36,296 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/B is initiating minor compaction (all files) 2024-11-23T15:25:36,296 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/B in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:36,297 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/988eae2626af4a68b3e6ffe279fb02ec, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ff01e6cd65b84bab88f0c1d0d4d61307, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c6cfaf421727447285d7c06b37807313] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=36.0 K 2024-11-23T15:25:36,297 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 635e4ceba9af4ec6b7975dc9f40ae841, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732375533933 2024-11-23T15:25:36,297 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d16e211eb5f34fb884225d5d17da3a6d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732375534252 2024-11-23T15:25:36,298 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 988eae2626af4a68b3e6ffe279fb02ec, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732375533933 2024-11-23T15:25:36,298 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting fec9ca9a317a419bbb3e91e1dfd75cbc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732375536079 2024-11-23T15:25:36,298 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting ff01e6cd65b84bab88f0c1d0d4d61307, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732375534252 2024-11-23T15:25:36,299 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c6cfaf421727447285d7c06b37807313, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732375536079 2024-11-23T15:25:36,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:36,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:25:36,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:36,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:36,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:36,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:36,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:36,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:36,313 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#A#compaction#221 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:36,314 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/634818014909485f940c4e9f1040b646 is 50, key is test_row_0/A:col10/1732375536185/Put/seqid=0 2024-11-23T15:25:36,316 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#B#compaction#222 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:36,317 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/e7678d040498413fa36b0dc5b313bf79 is 50, key is test_row_0/B:col10/1732375536185/Put/seqid=0 2024-11-23T15:25:36,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/c902ab83a35b445d93cf6dc5a2933a9e is 50, key is test_row_0/A:col10/1732375536192/Put/seqid=0 2024-11-23T15:25:36,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742087_1263 (size=12697) 2024-11-23T15:25:36,341 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/e7678d040498413fa36b0dc5b313bf79 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/e7678d040498413fa36b0dc5b313bf79 2024-11-23T15:25:36,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742088_1264 (size=12697) 2024-11-23T15:25:36,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,347 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/B of 9d175c2ab5829c897c27b4bef55dd393 into e7678d040498413fa36b0dc5b313bf79(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375596342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,347 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:36,347 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/B, priority=13, startTime=1732375536293; duration=0sec 2024-11-23T15:25:36,347 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:36,347 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:B 2024-11-23T15:25:36,347 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:36,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742089_1265 (size=14591) 2024-11-23T15:25:36,349 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/c902ab83a35b445d93cf6dc5a2933a9e 2024-11-23T15:25:36,350 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:36,350 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/C is initiating minor compaction (all files) 2024-11-23T15:25:36,350 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/C in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:36,350 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/f20ead99a46a4620b172f445d6faa565, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9fb15d5ef3864ef599eb3305a5476fd3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/056c93d9f50942e8a697a059def46fd0] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=36.0 K 2024-11-23T15:25:36,350 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f20ead99a46a4620b172f445d6faa565, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732375533933 2024-11-23T15:25:36,351 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fb15d5ef3864ef599eb3305a5476fd3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732375534252 2024-11-23T15:25:36,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,352 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 056c93d9f50942e8a697a059def46fd0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732375536079 2024-11-23T15:25:36,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375596347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375596348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/184630c51d8d41cf8144cb9cc3c182c7 is 50, key is test_row_0/B:col10/1732375536192/Put/seqid=0 2024-11-23T15:25:36,363 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#C#compaction#225 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:36,364 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/f27fd66f2d5f40e78c4e321b4fedc0c4 is 50, key is test_row_0/C:col10/1732375536185/Put/seqid=0 2024-11-23T15:25:36,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742090_1266 (size=12201) 2024-11-23T15:25:36,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742091_1267 (size=12663) 2024-11-23T15:25:36,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375596379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375596379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,383 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/f27fd66f2d5f40e78c4e321b4fedc0c4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/f27fd66f2d5f40e78c4e321b4fedc0c4 2024-11-23T15:25:36,389 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/C of 9d175c2ab5829c897c27b4bef55dd393 into f27fd66f2d5f40e78c4e321b4fedc0c4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:36,389 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:36,389 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/C, priority=13, startTime=1732375536293; duration=0sec 2024-11-23T15:25:36,389 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:36,389 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:C 2024-11-23T15:25:36,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375596448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375596453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375596453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375596651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375596655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375596656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,751 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/634818014909485f940c4e9f1040b646 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/634818014909485f940c4e9f1040b646 2024-11-23T15:25:36,757 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/A of 9d175c2ab5829c897c27b4bef55dd393 into 634818014909485f940c4e9f1040b646(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:36,757 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:36,757 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/A, priority=13, startTime=1732375536293; duration=0sec 2024-11-23T15:25:36,757 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:36,757 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:A 2024-11-23T15:25:36,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/184630c51d8d41cf8144cb9cc3c182c7 2024-11-23T15:25:36,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/c71ad14ca7ef4bc48a94ef6a64ea1f9b is 50, key is test_row_0/C:col10/1732375536192/Put/seqid=0 2024-11-23T15:25:36,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742092_1268 (size=12201) 2024-11-23T15:25:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T15:25:36,830 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-23T15:25:36,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:36,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-23T15:25:36,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T15:25:36,833 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:36,833 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:36,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:36,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T15:25:36,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375596953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375596960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:36,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375596960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,985 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:36,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-23T15:25:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:36,986 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:36,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:36,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T15:25:37,138 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-23T15:25:37,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:37,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:37,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:37,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:37,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:37,186 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/c71ad14ca7ef4bc48a94ef6a64ea1f9b 2024-11-23T15:25:37,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/c902ab83a35b445d93cf6dc5a2933a9e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c902ab83a35b445d93cf6dc5a2933a9e 2024-11-23T15:25:37,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c902ab83a35b445d93cf6dc5a2933a9e, entries=200, sequenceid=257, filesize=14.2 K 2024-11-23T15:25:37,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/184630c51d8d41cf8144cb9cc3c182c7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/184630c51d8d41cf8144cb9cc3c182c7 2024-11-23T15:25:37,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/184630c51d8d41cf8144cb9cc3c182c7, entries=150, sequenceid=257, filesize=11.9 K 2024-11-23T15:25:37,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/c71ad14ca7ef4bc48a94ef6a64ea1f9b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c71ad14ca7ef4bc48a94ef6a64ea1f9b 2024-11-23T15:25:37,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c71ad14ca7ef4bc48a94ef6a64ea1f9b, entries=150, sequenceid=257, filesize=11.9 K 2024-11-23T15:25:37,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 9d175c2ab5829c897c27b4bef55dd393 in 906ms, sequenceid=257, compaction requested=false 2024-11-23T15:25:37,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:37,291 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-23T15:25:37,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:37,291 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:25:37,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:37,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:37,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:37,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:37,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:37,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:37,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/2496cdcaeebe4585a426f7619992bd2b is 50, key is test_row_0/A:col10/1732375536339/Put/seqid=0 2024-11-23T15:25:37,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742093_1269 (size=12301) 2024-11-23T15:25:37,304 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/2496cdcaeebe4585a426f7619992bd2b 2024-11-23T15:25:37,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/145148173f4d4a9d9d41144d20db8be8 is 50, key is test_row_0/B:col10/1732375536339/Put/seqid=0 2024-11-23T15:25:37,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742094_1270 (size=12301) 2024-11-23T15:25:37,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T15:25:37,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:37,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:37,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:37,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375597467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:37,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375597469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:37,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375597469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:37,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375597570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:37,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375597572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:37,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375597572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,717 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/145148173f4d4a9d9d41144d20db8be8 2024-11-23T15:25:37,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/a420de9683be4bc9bdd8864a942ee7fa is 50, key is test_row_0/C:col10/1732375536339/Put/seqid=0 2024-11-23T15:25:37,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742095_1271 (size=12301) 2024-11-23T15:25:37,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:37,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375597772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:37,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375597775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:37,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375597776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T15:25:38,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375598075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375598077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375598078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,139 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/a420de9683be4bc9bdd8864a942ee7fa 2024-11-23T15:25:38,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/2496cdcaeebe4585a426f7619992bd2b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2496cdcaeebe4585a426f7619992bd2b 2024-11-23T15:25:38,148 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2496cdcaeebe4585a426f7619992bd2b, entries=150, sequenceid=285, filesize=12.0 K 2024-11-23T15:25:38,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/145148173f4d4a9d9d41144d20db8be8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/145148173f4d4a9d9d41144d20db8be8 2024-11-23T15:25:38,154 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/145148173f4d4a9d9d41144d20db8be8, entries=150, sequenceid=285, filesize=12.0 K 2024-11-23T15:25:38,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/a420de9683be4bc9bdd8864a942ee7fa as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/a420de9683be4bc9bdd8864a942ee7fa 2024-11-23T15:25:38,159 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/a420de9683be4bc9bdd8864a942ee7fa, entries=150, sequenceid=285, filesize=12.0 K 2024-11-23T15:25:38,160 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 9d175c2ab5829c897c27b4bef55dd393 in 869ms, sequenceid=285, compaction requested=true 2024-11-23T15:25:38,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:38,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:38,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-23T15:25:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-23T15:25:38,163 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-23T15:25:38,163 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3280 sec 2024-11-23T15:25:38,164 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.3320 sec 2024-11-23T15:25:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:38,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:25:38,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:38,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:38,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:38,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:38,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:38,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:38,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/144a8899ba6e45b392d6e74e13cee065 is 50, key is test_row_0/A:col10/1732375538390/Put/seqid=0 2024-11-23T15:25:38,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742096_1272 (size=12301) 2024-11-23T15:25:38,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/144a8899ba6e45b392d6e74e13cee065 2024-11-23T15:25:38,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b359022cd91d42c4a9f322729673cff0 is 50, key is test_row_0/B:col10/1732375538390/Put/seqid=0 2024-11-23T15:25:38,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375598428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375598429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742097_1273 (size=12301) 2024-11-23T15:25:38,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b359022cd91d42c4a9f322729673cff0 2024-11-23T15:25:38,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/0e53fce3930c41e48ef8dba589b52f9d is 50, key is test_row_0/C:col10/1732375538390/Put/seqid=0 2024-11-23T15:25:38,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742098_1274 (size=12301) 2024-11-23T15:25:38,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/0e53fce3930c41e48ef8dba589b52f9d 2024-11-23T15:25:38,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/144a8899ba6e45b392d6e74e13cee065 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/144a8899ba6e45b392d6e74e13cee065 2024-11-23T15:25:38,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/144a8899ba6e45b392d6e74e13cee065, entries=150, sequenceid=297, filesize=12.0 K 2024-11-23T15:25:38,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b359022cd91d42c4a9f322729673cff0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b359022cd91d42c4a9f322729673cff0 2024-11-23T15:25:38,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b359022cd91d42c4a9f322729673cff0, entries=150, sequenceid=297, filesize=12.0 K 2024-11-23T15:25:38,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/0e53fce3930c41e48ef8dba589b52f9d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/0e53fce3930c41e48ef8dba589b52f9d 2024-11-23T15:25:38,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/0e53fce3930c41e48ef8dba589b52f9d, entries=150, sequenceid=297, filesize=12.0 K 2024-11-23T15:25:38,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 9d175c2ab5829c897c27b4bef55dd393 in 88ms, sequenceid=297, compaction requested=true 2024-11-23T15:25:38,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:38,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:38,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:38,479 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:38,479 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:38,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:38,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:38,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:38,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:38,481 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51890 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:38,481 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49500 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:38,481 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/A is initiating minor compaction (all files) 2024-11-23T15:25:38,481 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/B is initiating minor compaction (all files) 2024-11-23T15:25:38,481 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/B in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:38,482 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/A in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:38,482 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/634818014909485f940c4e9f1040b646, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c902ab83a35b445d93cf6dc5a2933a9e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2496cdcaeebe4585a426f7619992bd2b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/144a8899ba6e45b392d6e74e13cee065] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=50.7 K 2024-11-23T15:25:38,482 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/e7678d040498413fa36b0dc5b313bf79, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/184630c51d8d41cf8144cb9cc3c182c7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/145148173f4d4a9d9d41144d20db8be8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b359022cd91d42c4a9f322729673cff0] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=48.3 K 2024-11-23T15:25:38,482 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting e7678d040498413fa36b0dc5b313bf79, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732375536079 2024-11-23T15:25:38,482 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 634818014909485f940c4e9f1040b646, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732375536079 2024-11-23T15:25:38,483 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 184630c51d8d41cf8144cb9cc3c182c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1732375536192 2024-11-23T15:25:38,483 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c902ab83a35b445d93cf6dc5a2933a9e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1732375536192 2024-11-23T15:25:38,483 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 145148173f4d4a9d9d41144d20db8be8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732375536339 2024-11-23T15:25:38,484 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2496cdcaeebe4585a426f7619992bd2b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732375536339 2024-11-23T15:25:38,484 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b359022cd91d42c4a9f322729673cff0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732375537468 2024-11-23T15:25:38,484 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 144a8899ba6e45b392d6e74e13cee065, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732375537468 2024-11-23T15:25:38,496 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#A#compaction#233 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:38,497 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/9a5b12809d764449bf2c06c2c1fc4b7b is 50, key is test_row_0/A:col10/1732375538390/Put/seqid=0 2024-11-23T15:25:38,501 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#B#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:38,501 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b92bea8c4d654978881121c9b36a9063 is 50, key is test_row_0/B:col10/1732375538390/Put/seqid=0 2024-11-23T15:25:38,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742100_1276 (size=12983) 2024-11-23T15:25:38,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742099_1275 (size=12983) 2024-11-23T15:25:38,513 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b92bea8c4d654978881121c9b36a9063 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b92bea8c4d654978881121c9b36a9063 2024-11-23T15:25:38,519 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/9a5b12809d764449bf2c06c2c1fc4b7b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/9a5b12809d764449bf2c06c2c1fc4b7b 2024-11-23T15:25:38,520 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/B of 9d175c2ab5829c897c27b4bef55dd393 into b92bea8c4d654978881121c9b36a9063(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:38,520 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:38,520 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/B, priority=12, startTime=1732375538479; duration=0sec 2024-11-23T15:25:38,520 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:38,520 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:B 2024-11-23T15:25:38,520 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:38,523 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49466 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:38,523 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/C is initiating minor compaction (all files) 2024-11-23T15:25:38,523 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/C in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:38,523 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/f27fd66f2d5f40e78c4e321b4fedc0c4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c71ad14ca7ef4bc48a94ef6a64ea1f9b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/a420de9683be4bc9bdd8864a942ee7fa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/0e53fce3930c41e48ef8dba589b52f9d] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=48.3 K 2024-11-23T15:25:38,525 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f27fd66f2d5f40e78c4e321b4fedc0c4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732375536079 2024-11-23T15:25:38,527 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/A of 9d175c2ab5829c897c27b4bef55dd393 into 9a5b12809d764449bf2c06c2c1fc4b7b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:38,527 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:38,527 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/A, priority=12, startTime=1732375538479; duration=0sec 2024-11-23T15:25:38,527 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:38,527 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:A 2024-11-23T15:25:38,528 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c71ad14ca7ef4bc48a94ef6a64ea1f9b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1732375536192 2024-11-23T15:25:38,528 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a420de9683be4bc9bdd8864a942ee7fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732375536339 2024-11-23T15:25:38,529 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e53fce3930c41e48ef8dba589b52f9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732375537468 2024-11-23T15:25:38,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:38,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:25:38,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:38,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:38,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:38,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:38,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:38,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:38,544 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#C#compaction#235 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:38,545 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/2e55a9cd1bdf4280b4075e2519b73f4c is 50, key is test_row_0/C:col10/1732375538390/Put/seqid=0 2024-11-23T15:25:38,547 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/03abbc3e731141749396df296f428f07 is 50, key is test_row_0/A:col10/1732375538533/Put/seqid=0 2024-11-23T15:25:38,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375598549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375598550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742101_1277 (size=12949) 2024-11-23T15:25:38,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742102_1278 (size=12301) 2024-11-23T15:25:38,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/03abbc3e731141749396df296f428f07 2024-11-23T15:25:38,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/ec971a2d645542ef8ecb35dbe969ee71 is 50, key is test_row_0/B:col10/1732375538533/Put/seqid=0 2024-11-23T15:25:38,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375598580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375598581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375598583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742103_1279 (size=12301) 2024-11-23T15:25:38,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375598653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375598653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375598855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375598856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:38,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T15:25:38,936 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-23T15:25:38,938 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-23T15:25:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T15:25:38,939 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:38,940 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:38,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:38,965 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/2e55a9cd1bdf4280b4075e2519b73f4c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/2e55a9cd1bdf4280b4075e2519b73f4c 2024-11-23T15:25:38,970 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/C of 9d175c2ab5829c897c27b4bef55dd393 into 2e55a9cd1bdf4280b4075e2519b73f4c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:38,970 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:38,970 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/C, priority=12, startTime=1732375538479; duration=0sec 2024-11-23T15:25:38,970 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:38,970 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:C 2024-11-23T15:25:38,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/ec971a2d645542ef8ecb35dbe969ee71 2024-11-23T15:25:38,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/dc38221e8be441bc9a1ec03e2a78405a is 50, key is test_row_0/C:col10/1732375538533/Put/seqid=0 2024-11-23T15:25:39,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742104_1280 (size=12301) 2024-11-23T15:25:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T15:25:39,091 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-23T15:25:39,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:39,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:39,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:39,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:39,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:39,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375599159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375599160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T15:25:39,245 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-23T15:25:39,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:39,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:39,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:39,246 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:39,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:39,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:39,398 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-23T15:25:39,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:39,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:39,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:39,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:39,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:39,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:39,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/dc38221e8be441bc9a1ec03e2a78405a 2024-11-23T15:25:39,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/03abbc3e731141749396df296f428f07 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/03abbc3e731141749396df296f428f07 2024-11-23T15:25:39,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/03abbc3e731141749396df296f428f07, entries=150, sequenceid=325, filesize=12.0 K 2024-11-23T15:25:39,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/ec971a2d645542ef8ecb35dbe969ee71 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ec971a2d645542ef8ecb35dbe969ee71 2024-11-23T15:25:39,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ec971a2d645542ef8ecb35dbe969ee71, entries=150, sequenceid=325, filesize=12.0 K 2024-11-23T15:25:39,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/dc38221e8be441bc9a1ec03e2a78405a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/dc38221e8be441bc9a1ec03e2a78405a 2024-11-23T15:25:39,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/dc38221e8be441bc9a1ec03e2a78405a, entries=150, sequenceid=325, filesize=12.0 K 2024-11-23T15:25:39,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 9d175c2ab5829c897c27b4bef55dd393 in 894ms, sequenceid=325, compaction requested=false 2024-11-23T15:25:39,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:39,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T15:25:39,557 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-23T15:25:39,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:39,557 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:25:39,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:39,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:39,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:39,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:39,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:39,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:39,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/e4c2757c816c423988403a7d6a897c2f is 50, key is test_row_0/A:col10/1732375538543/Put/seqid=0 2024-11-23T15:25:39,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742105_1281 (size=12301) 2024-11-23T15:25:39,585 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/e4c2757c816c423988403a7d6a897c2f 2024-11-23T15:25:39,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:39,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:39,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/52e42c5ee1f546f4bb4e2a177a486961 is 50, key is test_row_0/B:col10/1732375538543/Put/seqid=0 2024-11-23T15:25:39,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742106_1282 (size=12301) 2024-11-23T15:25:39,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375599640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375599640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375599641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375599661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375599663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375599744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375599744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375599744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375599947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375599947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:39,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375599947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,023 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/52e42c5ee1f546f4bb4e2a177a486961 2024-11-23T15:25:40,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/eb5699146a4f46cfa82b86aa1b534c02 is 50, key is test_row_0/C:col10/1732375538543/Put/seqid=0 2024-11-23T15:25:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T15:25:40,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742107_1283 (size=12301) 2024-11-23T15:25:40,051 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/eb5699146a4f46cfa82b86aa1b534c02 2024-11-23T15:25:40,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/e4c2757c816c423988403a7d6a897c2f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/e4c2757c816c423988403a7d6a897c2f 2024-11-23T15:25:40,062 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/e4c2757c816c423988403a7d6a897c2f, entries=150, sequenceid=337, filesize=12.0 K 2024-11-23T15:25:40,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/52e42c5ee1f546f4bb4e2a177a486961 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/52e42c5ee1f546f4bb4e2a177a486961 2024-11-23T15:25:40,069 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/52e42c5ee1f546f4bb4e2a177a486961, entries=150, sequenceid=337, filesize=12.0 K 2024-11-23T15:25:40,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/eb5699146a4f46cfa82b86aa1b534c02 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/eb5699146a4f46cfa82b86aa1b534c02 2024-11-23T15:25:40,074 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/eb5699146a4f46cfa82b86aa1b534c02, entries=150, sequenceid=337, filesize=12.0 K 2024-11-23T15:25:40,074 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9d175c2ab5829c897c27b4bef55dd393 in 517ms, sequenceid=337, compaction requested=true 2024-11-23T15:25:40,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:40,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:40,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-23T15:25:40,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-23T15:25:40,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-23T15:25:40,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1360 sec 2024-11-23T15:25:40,079 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.1400 sec 2024-11-23T15:25:40,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:40,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T15:25:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:40,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:40,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:40,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:40,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:40,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/276e28eb455e425d9752dd28c05475eb is 50, key is test_row_0/A:col10/1732375540253/Put/seqid=0 2024-11-23T15:25:40,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375600259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375600261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375600262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742108_1284 (size=14741) 2024-11-23T15:25:40,271 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/276e28eb455e425d9752dd28c05475eb 2024-11-23T15:25:40,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b640ad3b3c174be6a4eb79acfab6043d is 50, key is test_row_0/B:col10/1732375540253/Put/seqid=0 2024-11-23T15:25:40,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742109_1285 (size=12301) 2024-11-23T15:25:40,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375600362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375600365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375600365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375600564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375600567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375600569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375600667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375600669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b640ad3b3c174be6a4eb79acfab6043d 2024-11-23T15:25:40,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/bf2fc92605a7444fa04b7c6e191f1a9e is 50, key is test_row_0/C:col10/1732375540253/Put/seqid=0 2024-11-23T15:25:40,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742110_1286 (size=12301) 2024-11-23T15:25:40,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375600867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375600870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:40,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375600874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T15:25:41,043 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-23T15:25:41,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:41,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-23T15:25:41,046 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:41,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T15:25:41,046 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:41,046 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:41,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/bf2fc92605a7444fa04b7c6e191f1a9e 2024-11-23T15:25:41,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/276e28eb455e425d9752dd28c05475eb as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/276e28eb455e425d9752dd28c05475eb 2024-11-23T15:25:41,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/276e28eb455e425d9752dd28c05475eb, entries=200, sequenceid=365, filesize=14.4 K 2024-11-23T15:25:41,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/b640ad3b3c174be6a4eb79acfab6043d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b640ad3b3c174be6a4eb79acfab6043d 2024-11-23T15:25:41,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b640ad3b3c174be6a4eb79acfab6043d, entries=150, sequenceid=365, filesize=12.0 K 2024-11-23T15:25:41,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/bf2fc92605a7444fa04b7c6e191f1a9e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/bf2fc92605a7444fa04b7c6e191f1a9e 2024-11-23T15:25:41,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/bf2fc92605a7444fa04b7c6e191f1a9e, entries=150, sequenceid=365, filesize=12.0 K 2024-11-23T15:25:41,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 9d175c2ab5829c897c27b4bef55dd393 in 872ms, sequenceid=365, compaction requested=true 2024-11-23T15:25:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:41,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:41,126 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:41,126 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:41,127 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:41,127 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:41,127 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/A is initiating minor compaction (all files) 2024-11-23T15:25:41,127 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/B is initiating minor compaction (all files) 2024-11-23T15:25:41,127 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/B in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:41,127 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/A in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:41,127 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b92bea8c4d654978881121c9b36a9063, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ec971a2d645542ef8ecb35dbe969ee71, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/52e42c5ee1f546f4bb4e2a177a486961, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b640ad3b3c174be6a4eb79acfab6043d] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=48.7 K 2024-11-23T15:25:41,127 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/9a5b12809d764449bf2c06c2c1fc4b7b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/03abbc3e731141749396df296f428f07, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/e4c2757c816c423988403a7d6a897c2f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/276e28eb455e425d9752dd28c05475eb] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=51.1 K 2024-11-23T15:25:41,128 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b92bea8c4d654978881121c9b36a9063, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732375537468 2024-11-23T15:25:41,128 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a5b12809d764449bf2c06c2c1fc4b7b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732375537468 2024-11-23T15:25:41,128 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03abbc3e731141749396df296f428f07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732375538428 2024-11-23T15:25:41,128 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting ec971a2d645542ef8ecb35dbe969ee71, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732375538428 2024-11-23T15:25:41,129 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4c2757c816c423988403a7d6a897c2f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732375538541 2024-11-23T15:25:41,129 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 52e42c5ee1f546f4bb4e2a177a486961, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732375538541 2024-11-23T15:25:41,129 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b640ad3b3c174be6a4eb79acfab6043d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732375539640 2024-11-23T15:25:41,129 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 276e28eb455e425d9752dd28c05475eb, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732375539640 2024-11-23T15:25:41,137 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#B#compaction#245 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:41,137 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#A#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:41,138 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/fadf44dd7f654ea2b88589a03c7c6226 is 50, key is test_row_0/B:col10/1732375540253/Put/seqid=0 2024-11-23T15:25:41,138 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/b7f05a9f4aad46bebadd8049a1c33bd2 is 50, key is test_row_0/A:col10/1732375540253/Put/seqid=0 2024-11-23T15:25:41,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T15:25:41,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742111_1287 (size=13119) 2024-11-23T15:25:41,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742112_1288 (size=13119) 2024-11-23T15:25:41,197 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-23T15:25:41,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:41,198 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-23T15:25:41,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:41,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:41,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:41,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:41,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:41,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:41,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/990b316cf8ac4e1fb6743fc21c5de879 is 50, key is test_row_0/A:col10/1732375540260/Put/seqid=0 2024-11-23T15:25:41,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742113_1289 (size=12301) 2024-11-23T15:25:41,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T15:25:41,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:41,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:41,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375601399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375601400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375601401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375601502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375601504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375601504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,555 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/fadf44dd7f654ea2b88589a03c7c6226 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/fadf44dd7f654ea2b88589a03c7c6226 2024-11-23T15:25:41,556 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/b7f05a9f4aad46bebadd8049a1c33bd2 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/b7f05a9f4aad46bebadd8049a1c33bd2 2024-11-23T15:25:41,562 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/B of 9d175c2ab5829c897c27b4bef55dd393 into fadf44dd7f654ea2b88589a03c7c6226(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:41,562 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:41,563 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/B, priority=12, startTime=1732375541126; duration=0sec 2024-11-23T15:25:41,563 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:41,563 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:B 2024-11-23T15:25:41,563 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:25:41,563 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/A of 9d175c2ab5829c897c27b4bef55dd393 into b7f05a9f4aad46bebadd8049a1c33bd2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:41,564 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:41,564 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/A, priority=12, startTime=1732375541126; duration=0sec 2024-11-23T15:25:41,564 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:41,564 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:A 2024-11-23T15:25:41,564 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:25:41,564 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/C is initiating minor compaction (all files) 2024-11-23T15:25:41,564 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/C in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:41,565 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/2e55a9cd1bdf4280b4075e2519b73f4c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/dc38221e8be441bc9a1ec03e2a78405a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/eb5699146a4f46cfa82b86aa1b534c02, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/bf2fc92605a7444fa04b7c6e191f1a9e] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=48.7 K 2024-11-23T15:25:41,566 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e55a9cd1bdf4280b4075e2519b73f4c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732375537468 2024-11-23T15:25:41,566 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting dc38221e8be441bc9a1ec03e2a78405a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732375538428 2024-11-23T15:25:41,566 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting eb5699146a4f46cfa82b86aa1b534c02, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732375538541 2024-11-23T15:25:41,566 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting bf2fc92605a7444fa04b7c6e191f1a9e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732375539640 2024-11-23T15:25:41,575 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#C#compaction#248 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:41,576 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/fea058fd0b3048b7b14dbc53a6068fd7 is 50, key is test_row_0/C:col10/1732375540253/Put/seqid=0 2024-11-23T15:25:41,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742114_1290 (size=13085) 2024-11-23T15:25:41,591 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/fea058fd0b3048b7b14dbc53a6068fd7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/fea058fd0b3048b7b14dbc53a6068fd7 2024-11-23T15:25:41,597 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/C of 9d175c2ab5829c897c27b4bef55dd393 into fea058fd0b3048b7b14dbc53a6068fd7(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:41,597 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:41,597 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/C, priority=12, startTime=1732375541126; duration=0sec 2024-11-23T15:25:41,597 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:41,597 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:C 2024-11-23T15:25:41,606 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/990b316cf8ac4e1fb6743fc21c5de879 2024-11-23T15:25:41,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/576ea058ed5444e0a7f0bf23bcc1dc1e is 50, key is test_row_0/B:col10/1732375540260/Put/seqid=0 2024-11-23T15:25:41,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742115_1291 (size=12301) 2024-11-23T15:25:41,619 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/576ea058ed5444e0a7f0bf23bcc1dc1e 2024-11-23T15:25:41,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/7243fcf69ebc49869b2d6172054115fa is 50, key is test_row_0/C:col10/1732375540260/Put/seqid=0 2024-11-23T15:25:41,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742116_1292 (size=12301) 2024-11-23T15:25:41,634 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/7243fcf69ebc49869b2d6172054115fa 2024-11-23T15:25:41,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/990b316cf8ac4e1fb6743fc21c5de879 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/990b316cf8ac4e1fb6743fc21c5de879 2024-11-23T15:25:41,645 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/990b316cf8ac4e1fb6743fc21c5de879, entries=150, sequenceid=374, filesize=12.0 K 2024-11-23T15:25:41,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T15:25:41,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/576ea058ed5444e0a7f0bf23bcc1dc1e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/576ea058ed5444e0a7f0bf23bcc1dc1e 2024-11-23T15:25:41,653 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/576ea058ed5444e0a7f0bf23bcc1dc1e, entries=150, sequenceid=374, filesize=12.0 K 2024-11-23T15:25:41,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/7243fcf69ebc49869b2d6172054115fa as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/7243fcf69ebc49869b2d6172054115fa 2024-11-23T15:25:41,658 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/7243fcf69ebc49869b2d6172054115fa, entries=150, sequenceid=374, filesize=12.0 K 2024-11-23T15:25:41,658 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 9d175c2ab5829c897c27b4bef55dd393 in 460ms, sequenceid=374, compaction requested=false 2024-11-23T15:25:41,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:41,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:41,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-23T15:25:41,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-23T15:25:41,661 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-23T15:25:41,661 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 614 msec 2024-11-23T15:25:41,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 618 msec 2024-11-23T15:25:41,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:41,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-23T15:25:41,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:41,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:41,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:41,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:41,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:41,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:41,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/626ca3607b48460a929e5e9c684047d1 is 50, key is test_row_0/A:col10/1732375541399/Put/seqid=0 2024-11-23T15:25:41,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375601711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375601712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375601714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742117_1293 (size=12301) 2024-11-23T15:25:41,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375601815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375601815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:41,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:41,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375601817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375602019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375602019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375602020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,119 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/626ca3607b48460a929e5e9c684047d1 2024-11-23T15:25:42,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/c01e00ae99b64d58bbaf9b8c9330af91 is 50, key is test_row_0/B:col10/1732375541399/Put/seqid=0 2024-11-23T15:25:42,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742118_1294 (size=12301) 2024-11-23T15:25:42,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T15:25:42,149 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-23T15:25:42,150 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:42,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-23T15:25:42,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-23T15:25:42,152 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:42,153 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:42,153 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-23T15:25:42,304 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-23T15:25:42,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:42,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375602323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375602324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375602324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-23T15:25:42,458 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-23T15:25:42,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:42,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,458 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/c01e00ae99b64d58bbaf9b8c9330af91 2024-11-23T15:25:42,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/310eb3cc70184e11af04507d1e017662 is 50, key is test_row_0/C:col10/1732375541399/Put/seqid=0 2024-11-23T15:25:42,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742119_1295 (size=12301) 2024-11-23T15:25:42,610 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-23T15:25:42,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:42,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43626 deadline: 1732375602675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,677 DEBUG [Thread-1009 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4128 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., hostname=6a36843bf905,33811,1732375456985, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:25:42,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43596 deadline: 1732375602689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,690 DEBUG [Thread-1011 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., hostname=6a36843bf905,33811,1732375456985, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:25:42,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-23T15:25:42,763 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-23T15:25:42,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:42,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43642 deadline: 1732375602829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43598 deadline: 1732375602829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43624 deadline: 1732375602830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,916 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:42,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-23T15:25:42,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. as already flushing 2024-11-23T15:25:42,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:42,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/310eb3cc70184e11af04507d1e017662 2024-11-23T15:25:42,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/626ca3607b48460a929e5e9c684047d1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/626ca3607b48460a929e5e9c684047d1 2024-11-23T15:25:42,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/626ca3607b48460a929e5e9c684047d1, entries=150, sequenceid=405, filesize=12.0 K 2024-11-23T15:25:42,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/c01e00ae99b64d58bbaf9b8c9330af91 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c01e00ae99b64d58bbaf9b8c9330af91 2024-11-23T15:25:42,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c01e00ae99b64d58bbaf9b8c9330af91, entries=150, sequenceid=405, filesize=12.0 K 2024-11-23T15:25:42,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/310eb3cc70184e11af04507d1e017662 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/310eb3cc70184e11af04507d1e017662 2024-11-23T15:25:42,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/310eb3cc70184e11af04507d1e017662, entries=150, sequenceid=405, filesize=12.0 K 2024-11-23T15:25:42,965 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 9d175c2ab5829c897c27b4bef55dd393 in 1257ms, sequenceid=405, compaction requested=true 2024-11-23T15:25:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:42,965 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9d175c2ab5829c897c27b4bef55dd393:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:42,965 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:42,966 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:42,966 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:42,966 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/A is initiating minor compaction (all files) 2024-11-23T15:25:42,966 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/B is initiating minor compaction (all files) 2024-11-23T15:25:42,966 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/B in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,966 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/A in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:42,967 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/fadf44dd7f654ea2b88589a03c7c6226, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/576ea058ed5444e0a7f0bf23bcc1dc1e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c01e00ae99b64d58bbaf9b8c9330af91] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=36.8 K 2024-11-23T15:25:42,967 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/b7f05a9f4aad46bebadd8049a1c33bd2, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/990b316cf8ac4e1fb6743fc21c5de879, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/626ca3607b48460a929e5e9c684047d1] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=36.8 K 2024-11-23T15:25:42,967 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting fadf44dd7f654ea2b88589a03c7c6226, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732375539640 2024-11-23T15:25:42,967 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7f05a9f4aad46bebadd8049a1c33bd2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732375539640 2024-11-23T15:25:42,968 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 576ea058ed5444e0a7f0bf23bcc1dc1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732375540255 2024-11-23T15:25:42,968 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 990b316cf8ac4e1fb6743fc21c5de879, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732375540255 2024-11-23T15:25:42,968 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c01e00ae99b64d58bbaf9b8c9330af91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732375541399 2024-11-23T15:25:42,968 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 626ca3607b48460a929e5e9c684047d1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732375541399 2024-11-23T15:25:42,977 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#B#compaction#254 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:42,978 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/65ba9456359d45eca7656c30c12eace2 is 50, key is test_row_0/B:col10/1732375541399/Put/seqid=0 2024-11-23T15:25:42,980 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#A#compaction#255 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:42,981 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/f569db4a67be4a4baa81d258cc88745e is 50, key is test_row_0/A:col10/1732375541399/Put/seqid=0 2024-11-23T15:25:42,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742121_1297 (size=13221) 2024-11-23T15:25:43,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742120_1296 (size=13221) 2024-11-23T15:25:43,068 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:43,069 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-23T15:25:43,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:43,069 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T15:25:43,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:43,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:43,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:43,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:43,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:43,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:43,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/a79990431e2a4947ab7d26c262d057f6 is 50, key is test_row_1/A:col10/1732375541713/Put/seqid=0 2024-11-23T15:25:43,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742122_1298 (size=9857) 2024-11-23T15:25:43,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-23T15:25:43,405 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/65ba9456359d45eca7656c30c12eace2 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/65ba9456359d45eca7656c30c12eace2 2024-11-23T15:25:43,408 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/f569db4a67be4a4baa81d258cc88745e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f569db4a67be4a4baa81d258cc88745e 2024-11-23T15:25:43,413 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/B of 9d175c2ab5829c897c27b4bef55dd393 into 65ba9456359d45eca7656c30c12eace2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:43,413 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:43,413 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/B, priority=13, startTime=1732375542965; duration=0sec 2024-11-23T15:25:43,413 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:43,413 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:B 2024-11-23T15:25:43,413 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:43,414 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/A of 9d175c2ab5829c897c27b4bef55dd393 into f569db4a67be4a4baa81d258cc88745e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:43,414 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:43,414 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/A, priority=13, startTime=1732375542965; duration=0sec 2024-11-23T15:25:43,414 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:43,414 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:A 2024-11-23T15:25:43,415 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:43,415 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 9d175c2ab5829c897c27b4bef55dd393/C is initiating minor compaction (all files) 2024-11-23T15:25:43,415 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9d175c2ab5829c897c27b4bef55dd393/C in TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:43,415 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/fea058fd0b3048b7b14dbc53a6068fd7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/7243fcf69ebc49869b2d6172054115fa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/310eb3cc70184e11af04507d1e017662] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp, totalSize=36.8 K 2024-11-23T15:25:43,416 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting fea058fd0b3048b7b14dbc53a6068fd7, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732375539640 2024-11-23T15:25:43,416 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7243fcf69ebc49869b2d6172054115fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732375540255 2024-11-23T15:25:43,416 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 310eb3cc70184e11af04507d1e017662, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732375541399 2024-11-23T15:25:43,426 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9d175c2ab5829c897c27b4bef55dd393#C#compaction#257 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:43,427 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/e9be86cfcf9946d29cda4943916215f3 is 50, key is test_row_0/C:col10/1732375541399/Put/seqid=0 2024-11-23T15:25:43,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742123_1299 (size=13187) 2024-11-23T15:25:43,486 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/a79990431e2a4947ab7d26c262d057f6 2024-11-23T15:25:43,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a73402bc85d544b2a76211d2b480dbf3 is 50, key is test_row_1/B:col10/1732375541713/Put/seqid=0 2024-11-23T15:25:43,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742124_1300 (size=9857) 2024-11-23T15:25:43,498 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a73402bc85d544b2a76211d2b480dbf3 2024-11-23T15:25:43,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/3c47635a45434f29a72144849d4cae90 is 50, key is test_row_1/C:col10/1732375541713/Put/seqid=0 2024-11-23T15:25:43,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742125_1301 (size=9857) 2024-11-23T15:25:43,510 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/3c47635a45434f29a72144849d4cae90 2024-11-23T15:25:43,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/a79990431e2a4947ab7d26c262d057f6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/a79990431e2a4947ab7d26c262d057f6 2024-11-23T15:25:43,519 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/a79990431e2a4947ab7d26c262d057f6, entries=100, sequenceid=413, filesize=9.6 K 2024-11-23T15:25:43,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a73402bc85d544b2a76211d2b480dbf3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a73402bc85d544b2a76211d2b480dbf3 2024-11-23T15:25:43,524 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a73402bc85d544b2a76211d2b480dbf3, entries=100, sequenceid=413, filesize=9.6 K 2024-11-23T15:25:43,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/3c47635a45434f29a72144849d4cae90 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/3c47635a45434f29a72144849d4cae90 2024-11-23T15:25:43,530 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/3c47635a45434f29a72144849d4cae90, entries=100, sequenceid=413, filesize=9.6 K 2024-11-23T15:25:43,531 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 9d175c2ab5829c897c27b4bef55dd393 in 462ms, sequenceid=413, compaction requested=false 2024-11-23T15:25:43,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:43,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:43,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-23T15:25:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-23T15:25:43,534 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-23T15:25:43,534 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3790 sec 2024-11-23T15:25:43,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 1.3830 sec 2024-11-23T15:25:43,670 DEBUG [Thread-1026 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c299cfb to 127.0.0.1:62881 2024-11-23T15:25:43,670 DEBUG [Thread-1026 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:43,670 DEBUG [Thread-1028 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x605827c9 to 127.0.0.1:62881 2024-11-23T15:25:43,670 DEBUG [Thread-1020 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14ed1e44 to 127.0.0.1:62881 2024-11-23T15:25:43,670 DEBUG [Thread-1020 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:43,670 DEBUG [Thread-1028 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:43,671 DEBUG [Thread-1024 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x036642cb to 127.0.0.1:62881 2024-11-23T15:25:43,671 DEBUG [Thread-1024 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:43,671 DEBUG [Thread-1022 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72537a47 to 127.0.0.1:62881 2024-11-23T15:25:43,671 DEBUG [Thread-1022 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:43,833 DEBUG [Thread-1013 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c480dfb to 127.0.0.1:62881 2024-11-23T15:25:43,833 DEBUG [Thread-1013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:43,835 DEBUG [Thread-1017 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e9ae050 to 127.0.0.1:62881 2024-11-23T15:25:43,835 DEBUG [Thread-1017 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:43,835 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/e9be86cfcf9946d29cda4943916215f3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/e9be86cfcf9946d29cda4943916215f3 2024-11-23T15:25:43,838 DEBUG [Thread-1015 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34cb3991 to 127.0.0.1:62881 2024-11-23T15:25:43,838 DEBUG [Thread-1015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:43,839 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9d175c2ab5829c897c27b4bef55dd393/C of 9d175c2ab5829c897c27b4bef55dd393 into e9be86cfcf9946d29cda4943916215f3(size=12.9 K), total size for store is 22.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:43,839 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:43,839 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393., storeName=9d175c2ab5829c897c27b4bef55dd393/C, priority=13, startTime=1732375542965; duration=0sec 2024-11-23T15:25:43,840 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:43,840 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9d175c2ab5829c897c27b4bef55dd393:C 2024-11-23T15:25:44,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-23T15:25:44,256 INFO [Thread-1019 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-23T15:25:45,402 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T15:25:46,697 DEBUG [Thread-1009 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c1ac389 to 127.0.0.1:62881 2024-11-23T15:25:46,697 DEBUG [Thread-1009 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:46,710 DEBUG [Thread-1011 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x028e73c0 to 127.0.0.1:62881 2024-11-23T15:25:46,711 DEBUG [Thread-1011 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7433 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7240 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7110 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7454 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7264 2024-11-23T15:25:46,711 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T15:25:46,711 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T15:25:46,711 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64dc42d9 to 127.0.0.1:62881 2024-11-23T15:25:46,711 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:25:46,712 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T15:25:46,712 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T15:25:46,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:46,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-23T15:25:46,714 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375546714"}]},"ts":"1732375546714"} 2024-11-23T15:25:46,715 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T15:25:46,717 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T15:25:46,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T15:25:46,719 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9d175c2ab5829c897c27b4bef55dd393, UNASSIGN}] 2024-11-23T15:25:46,719 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9d175c2ab5829c897c27b4bef55dd393, UNASSIGN 2024-11-23T15:25:46,719 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=9d175c2ab5829c897c27b4bef55dd393, regionState=CLOSING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:25:46,720 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T15:25:46,720 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:25:46,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-23T15:25:46,871 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:46,872 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 9d175c2ab5829c897c27b4bef55dd393, disabling compactions & flushes 2024-11-23T15:25:46,872 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. after waiting 0 ms 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:46,872 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(2837): Flushing 9d175c2ab5829c897c27b4bef55dd393 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=A 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=B 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9d175c2ab5829c897c27b4bef55dd393, store=C 2024-11-23T15:25:46,872 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:46,876 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/27325121496e4b378294a3baa3f55ddd is 50, key is test_row_1/A:col10/1732375546696/Put/seqid=0 2024-11-23T15:25:46,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742126_1302 (size=9857) 2024-11-23T15:25:47,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-23T15:25:47,280 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/27325121496e4b378294a3baa3f55ddd 2024-11-23T15:25:47,287 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a03b245b798747f6aa50e8eaa503d74e is 50, key is test_row_1/B:col10/1732375546696/Put/seqid=0 2024-11-23T15:25:47,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742127_1303 (size=9857) 2024-11-23T15:25:47,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-23T15:25:47,691 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a03b245b798747f6aa50e8eaa503d74e 2024-11-23T15:25:47,697 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/5324e910a8274e20a813ab253ccbfd98 is 50, key is test_row_1/C:col10/1732375546696/Put/seqid=0 2024-11-23T15:25:47,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742128_1304 (size=9857) 2024-11-23T15:25:47,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-23T15:25:48,101 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/5324e910a8274e20a813ab253ccbfd98 2024-11-23T15:25:48,105 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/A/27325121496e4b378294a3baa3f55ddd as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/27325121496e4b378294a3baa3f55ddd 2024-11-23T15:25:48,108 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/27325121496e4b378294a3baa3f55ddd, entries=100, sequenceid=424, filesize=9.6 K 2024-11-23T15:25:48,108 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/B/a03b245b798747f6aa50e8eaa503d74e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a03b245b798747f6aa50e8eaa503d74e 2024-11-23T15:25:48,116 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a03b245b798747f6aa50e8eaa503d74e, entries=100, sequenceid=424, filesize=9.6 K 2024-11-23T15:25:48,117 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/.tmp/C/5324e910a8274e20a813ab253ccbfd98 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/5324e910a8274e20a813ab253ccbfd98 2024-11-23T15:25:48,120 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/5324e910a8274e20a813ab253ccbfd98, entries=100, sequenceid=424, filesize=9.6 K 2024-11-23T15:25:48,121 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 9d175c2ab5829c897c27b4bef55dd393 in 1249ms, sequenceid=424, compaction requested=true 2024-11-23T15:25:48,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/a7b16659eeeb48699f98c718208e35ba, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/7af5775a22d641a49242e2358234504a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/31efe6285ea74ecc9b9078f69e65c98b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f5cf535ee07643f193f22c3558762e99, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/817b32710d2f4fd1b3e150b2acbd385b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/5afe36eacc24481ab7cc3180c5615858, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f1ce2c8c04e24c65bbeeb736cafd8122, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c65caff867e94621940be26ed817fcff, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/32bbf6f6eb3843188f49c3a7433f47a8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/dff9c4325ede4685a5436aeac5acf34e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2fed2d6d0b2e449f8725510f6bb5ba57, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/bc19e12c8f644bcea03932e4830a60b9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/14acc8db6f934ef98fb7f2cfc7af96eb, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/635e4ceba9af4ec6b7975dc9f40ae841, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/d16e211eb5f34fb884225d5d17da3a6d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/634818014909485f940c4e9f1040b646, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/fec9ca9a317a419bbb3e91e1dfd75cbc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c902ab83a35b445d93cf6dc5a2933a9e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2496cdcaeebe4585a426f7619992bd2b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/9a5b12809d764449bf2c06c2c1fc4b7b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/144a8899ba6e45b392d6e74e13cee065, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/03abbc3e731141749396df296f428f07, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/e4c2757c816c423988403a7d6a897c2f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/276e28eb455e425d9752dd28c05475eb, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/b7f05a9f4aad46bebadd8049a1c33bd2, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/990b316cf8ac4e1fb6743fc21c5de879, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/626ca3607b48460a929e5e9c684047d1] to archive 2024-11-23T15:25:48,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:25:48,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/a7b16659eeeb48699f98c718208e35ba to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/a7b16659eeeb48699f98c718208e35ba 2024-11-23T15:25:48,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/7af5775a22d641a49242e2358234504a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/7af5775a22d641a49242e2358234504a 2024-11-23T15:25:48,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/31efe6285ea74ecc9b9078f69e65c98b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/31efe6285ea74ecc9b9078f69e65c98b 2024-11-23T15:25:48,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f5cf535ee07643f193f22c3558762e99 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f5cf535ee07643f193f22c3558762e99 2024-11-23T15:25:48,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/817b32710d2f4fd1b3e150b2acbd385b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/817b32710d2f4fd1b3e150b2acbd385b 2024-11-23T15:25:48,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/5afe36eacc24481ab7cc3180c5615858 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/5afe36eacc24481ab7cc3180c5615858 2024-11-23T15:25:48,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f1ce2c8c04e24c65bbeeb736cafd8122 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f1ce2c8c04e24c65bbeeb736cafd8122 2024-11-23T15:25:48,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c65caff867e94621940be26ed817fcff to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c65caff867e94621940be26ed817fcff 2024-11-23T15:25:48,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/32bbf6f6eb3843188f49c3a7433f47a8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/32bbf6f6eb3843188f49c3a7433f47a8 2024-11-23T15:25:48,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/dff9c4325ede4685a5436aeac5acf34e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/dff9c4325ede4685a5436aeac5acf34e 2024-11-23T15:25:48,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2fed2d6d0b2e449f8725510f6bb5ba57 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2fed2d6d0b2e449f8725510f6bb5ba57 2024-11-23T15:25:48,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/bc19e12c8f644bcea03932e4830a60b9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/bc19e12c8f644bcea03932e4830a60b9 2024-11-23T15:25:48,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/14acc8db6f934ef98fb7f2cfc7af96eb to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/14acc8db6f934ef98fb7f2cfc7af96eb 2024-11-23T15:25:48,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/635e4ceba9af4ec6b7975dc9f40ae841 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/635e4ceba9af4ec6b7975dc9f40ae841 2024-11-23T15:25:48,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/d16e211eb5f34fb884225d5d17da3a6d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/d16e211eb5f34fb884225d5d17da3a6d 2024-11-23T15:25:48,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/634818014909485f940c4e9f1040b646 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/634818014909485f940c4e9f1040b646 2024-11-23T15:25:48,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/fec9ca9a317a419bbb3e91e1dfd75cbc to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/fec9ca9a317a419bbb3e91e1dfd75cbc 2024-11-23T15:25:48,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c902ab83a35b445d93cf6dc5a2933a9e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/c902ab83a35b445d93cf6dc5a2933a9e 2024-11-23T15:25:48,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2496cdcaeebe4585a426f7619992bd2b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/2496cdcaeebe4585a426f7619992bd2b 2024-11-23T15:25:48,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/9a5b12809d764449bf2c06c2c1fc4b7b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/9a5b12809d764449bf2c06c2c1fc4b7b 2024-11-23T15:25:48,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/144a8899ba6e45b392d6e74e13cee065 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/144a8899ba6e45b392d6e74e13cee065 2024-11-23T15:25:48,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/03abbc3e731141749396df296f428f07 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/03abbc3e731141749396df296f428f07 2024-11-23T15:25:48,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/e4c2757c816c423988403a7d6a897c2f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/e4c2757c816c423988403a7d6a897c2f 2024-11-23T15:25:48,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/276e28eb455e425d9752dd28c05475eb to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/276e28eb455e425d9752dd28c05475eb 2024-11-23T15:25:48,145 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/b7f05a9f4aad46bebadd8049a1c33bd2 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/b7f05a9f4aad46bebadd8049a1c33bd2 2024-11-23T15:25:48,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/990b316cf8ac4e1fb6743fc21c5de879 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/990b316cf8ac4e1fb6743fc21c5de879 2024-11-23T15:25:48,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/626ca3607b48460a929e5e9c684047d1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/626ca3607b48460a929e5e9c684047d1 2024-11-23T15:25:48,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6f1b520ae3b24fefb63310ad64a7f53b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6865cac2ffb64975a15a7da0d8e17daa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/94ab22e6072547788dec48f4df2816ec, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/1280dfa1fbf94cefb0b5156751c39f72, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/3c25d53b551b4d3f8f15281f77ac5470, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/28b3b1c023234af380514983526dacb5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a460fe83f4984283861e4dd2fb635672, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a334d2f1142847e390f2665b774f2d2d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/24708821b2034c5383a793e312eea481, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/139b1e9aa79e4bc986435b489d23bf98, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b4cbeda552974e8dabdeb0448f380148, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/8c8a2a64a8d04ff4b196fb0a46aeff33, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/988eae2626af4a68b3e6ffe279fb02ec, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/eb1d749540f145f0af34a381118faabb, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ff01e6cd65b84bab88f0c1d0d4d61307, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/e7678d040498413fa36b0dc5b313bf79, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c6cfaf421727447285d7c06b37807313, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/184630c51d8d41cf8144cb9cc3c182c7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/145148173f4d4a9d9d41144d20db8be8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b92bea8c4d654978881121c9b36a9063, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b359022cd91d42c4a9f322729673cff0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ec971a2d645542ef8ecb35dbe969ee71, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/52e42c5ee1f546f4bb4e2a177a486961, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/fadf44dd7f654ea2b88589a03c7c6226, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b640ad3b3c174be6a4eb79acfab6043d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/576ea058ed5444e0a7f0bf23bcc1dc1e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c01e00ae99b64d58bbaf9b8c9330af91] to archive 2024-11-23T15:25:48,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:25:48,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6f1b520ae3b24fefb63310ad64a7f53b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6f1b520ae3b24fefb63310ad64a7f53b 2024-11-23T15:25:48,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6865cac2ffb64975a15a7da0d8e17daa to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/6865cac2ffb64975a15a7da0d8e17daa 2024-11-23T15:25:48,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/94ab22e6072547788dec48f4df2816ec to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/94ab22e6072547788dec48f4df2816ec 2024-11-23T15:25:48,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/1280dfa1fbf94cefb0b5156751c39f72 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/1280dfa1fbf94cefb0b5156751c39f72 2024-11-23T15:25:48,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/3c25d53b551b4d3f8f15281f77ac5470 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/3c25d53b551b4d3f8f15281f77ac5470 2024-11-23T15:25:48,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/28b3b1c023234af380514983526dacb5 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/28b3b1c023234af380514983526dacb5 2024-11-23T15:25:48,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a460fe83f4984283861e4dd2fb635672 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a460fe83f4984283861e4dd2fb635672 2024-11-23T15:25:48,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a334d2f1142847e390f2665b774f2d2d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a334d2f1142847e390f2665b774f2d2d 2024-11-23T15:25:48,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/24708821b2034c5383a793e312eea481 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/24708821b2034c5383a793e312eea481 2024-11-23T15:25:48,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/139b1e9aa79e4bc986435b489d23bf98 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/139b1e9aa79e4bc986435b489d23bf98 2024-11-23T15:25:48,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b4cbeda552974e8dabdeb0448f380148 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b4cbeda552974e8dabdeb0448f380148 2024-11-23T15:25:48,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/8c8a2a64a8d04ff4b196fb0a46aeff33 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/8c8a2a64a8d04ff4b196fb0a46aeff33 2024-11-23T15:25:48,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/988eae2626af4a68b3e6ffe279fb02ec to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/988eae2626af4a68b3e6ffe279fb02ec 2024-11-23T15:25:48,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/eb1d749540f145f0af34a381118faabb to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/eb1d749540f145f0af34a381118faabb 2024-11-23T15:25:48,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ff01e6cd65b84bab88f0c1d0d4d61307 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ff01e6cd65b84bab88f0c1d0d4d61307 2024-11-23T15:25:48,165 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/e7678d040498413fa36b0dc5b313bf79 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/e7678d040498413fa36b0dc5b313bf79 2024-11-23T15:25:48,166 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c6cfaf421727447285d7c06b37807313 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c6cfaf421727447285d7c06b37807313 2024-11-23T15:25:48,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/184630c51d8d41cf8144cb9cc3c182c7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/184630c51d8d41cf8144cb9cc3c182c7 2024-11-23T15:25:48,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/145148173f4d4a9d9d41144d20db8be8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/145148173f4d4a9d9d41144d20db8be8 2024-11-23T15:25:48,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b92bea8c4d654978881121c9b36a9063 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b92bea8c4d654978881121c9b36a9063 2024-11-23T15:25:48,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b359022cd91d42c4a9f322729673cff0 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b359022cd91d42c4a9f322729673cff0 2024-11-23T15:25:48,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ec971a2d645542ef8ecb35dbe969ee71 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/ec971a2d645542ef8ecb35dbe969ee71 2024-11-23T15:25:48,173 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/52e42c5ee1f546f4bb4e2a177a486961 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/52e42c5ee1f546f4bb4e2a177a486961 2024-11-23T15:25:48,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/fadf44dd7f654ea2b88589a03c7c6226 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/fadf44dd7f654ea2b88589a03c7c6226 2024-11-23T15:25:48,175 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b640ad3b3c174be6a4eb79acfab6043d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/b640ad3b3c174be6a4eb79acfab6043d 2024-11-23T15:25:48,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/576ea058ed5444e0a7f0bf23bcc1dc1e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/576ea058ed5444e0a7f0bf23bcc1dc1e 2024-11-23T15:25:48,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c01e00ae99b64d58bbaf9b8c9330af91 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/c01e00ae99b64d58bbaf9b8c9330af91 2024-11-23T15:25:48,178 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/155d8df9687249f580bae119913a125c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/33c7f76f0e7546f9a3ac1433e8904f27, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/61b879b52b574ca79e22eae226faee54, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/636abcafa81d4c3389223cc8ec33b52a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/cb87a155e37f47aa8508ee4c7003df8b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c5d0a09527d0467188e2459a2245dd24, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/97c39e6784a64f43a34279a8a1795576, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9d9a0ec19bf147c485d18e3ee53fe2aa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/70e4bb4f9c864257a6b86d3f21cb1ae2, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/29973944ec6b496589f5f334bd48a8d5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/6a7ee4a7436e43989affdd155ba6f8bb, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/f20ead99a46a4620b172f445d6faa565, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9cf24856301b4f2184f945a9996a1439, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9fb15d5ef3864ef599eb3305a5476fd3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/f27fd66f2d5f40e78c4e321b4fedc0c4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/056c93d9f50942e8a697a059def46fd0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c71ad14ca7ef4bc48a94ef6a64ea1f9b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/a420de9683be4bc9bdd8864a942ee7fa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/2e55a9cd1bdf4280b4075e2519b73f4c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/0e53fce3930c41e48ef8dba589b52f9d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/dc38221e8be441bc9a1ec03e2a78405a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/eb5699146a4f46cfa82b86aa1b534c02, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/fea058fd0b3048b7b14dbc53a6068fd7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/bf2fc92605a7444fa04b7c6e191f1a9e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/7243fcf69ebc49869b2d6172054115fa, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/310eb3cc70184e11af04507d1e017662] to archive 2024-11-23T15:25:48,179 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:25:48,180 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/155d8df9687249f580bae119913a125c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/155d8df9687249f580bae119913a125c 2024-11-23T15:25:48,181 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/33c7f76f0e7546f9a3ac1433e8904f27 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/33c7f76f0e7546f9a3ac1433e8904f27 2024-11-23T15:25:48,183 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/61b879b52b574ca79e22eae226faee54 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/61b879b52b574ca79e22eae226faee54 2024-11-23T15:25:48,184 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/636abcafa81d4c3389223cc8ec33b52a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/636abcafa81d4c3389223cc8ec33b52a 2024-11-23T15:25:48,185 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/cb87a155e37f47aa8508ee4c7003df8b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/cb87a155e37f47aa8508ee4c7003df8b 2024-11-23T15:25:48,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c5d0a09527d0467188e2459a2245dd24 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c5d0a09527d0467188e2459a2245dd24 2024-11-23T15:25:48,187 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/97c39e6784a64f43a34279a8a1795576 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/97c39e6784a64f43a34279a8a1795576 2024-11-23T15:25:48,188 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9d9a0ec19bf147c485d18e3ee53fe2aa to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9d9a0ec19bf147c485d18e3ee53fe2aa 2024-11-23T15:25:48,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/70e4bb4f9c864257a6b86d3f21cb1ae2 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/70e4bb4f9c864257a6b86d3f21cb1ae2 2024-11-23T15:25:48,191 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/29973944ec6b496589f5f334bd48a8d5 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/29973944ec6b496589f5f334bd48a8d5 2024-11-23T15:25:48,192 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/6a7ee4a7436e43989affdd155ba6f8bb to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/6a7ee4a7436e43989affdd155ba6f8bb 2024-11-23T15:25:48,193 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/f20ead99a46a4620b172f445d6faa565 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/f20ead99a46a4620b172f445d6faa565 2024-11-23T15:25:48,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9cf24856301b4f2184f945a9996a1439 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9cf24856301b4f2184f945a9996a1439 2024-11-23T15:25:48,196 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9fb15d5ef3864ef599eb3305a5476fd3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/9fb15d5ef3864ef599eb3305a5476fd3 2024-11-23T15:25:48,197 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/f27fd66f2d5f40e78c4e321b4fedc0c4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/f27fd66f2d5f40e78c4e321b4fedc0c4 2024-11-23T15:25:48,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/056c93d9f50942e8a697a059def46fd0 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/056c93d9f50942e8a697a059def46fd0 2024-11-23T15:25:48,199 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c71ad14ca7ef4bc48a94ef6a64ea1f9b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/c71ad14ca7ef4bc48a94ef6a64ea1f9b 2024-11-23T15:25:48,200 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/a420de9683be4bc9bdd8864a942ee7fa to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/a420de9683be4bc9bdd8864a942ee7fa 2024-11-23T15:25:48,201 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/2e55a9cd1bdf4280b4075e2519b73f4c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/2e55a9cd1bdf4280b4075e2519b73f4c 2024-11-23T15:25:48,203 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/0e53fce3930c41e48ef8dba589b52f9d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/0e53fce3930c41e48ef8dba589b52f9d 2024-11-23T15:25:48,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/dc38221e8be441bc9a1ec03e2a78405a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/dc38221e8be441bc9a1ec03e2a78405a 2024-11-23T15:25:48,205 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/eb5699146a4f46cfa82b86aa1b534c02 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/eb5699146a4f46cfa82b86aa1b534c02 2024-11-23T15:25:48,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/fea058fd0b3048b7b14dbc53a6068fd7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/fea058fd0b3048b7b14dbc53a6068fd7 2024-11-23T15:25:48,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/bf2fc92605a7444fa04b7c6e191f1a9e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/bf2fc92605a7444fa04b7c6e191f1a9e 2024-11-23T15:25:48,208 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/7243fcf69ebc49869b2d6172054115fa to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/7243fcf69ebc49869b2d6172054115fa 2024-11-23T15:25:48,209 DEBUG [StoreCloser-TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/310eb3cc70184e11af04507d1e017662 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/310eb3cc70184e11af04507d1e017662 2024-11-23T15:25:48,213 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/recovered.edits/427.seqid, newMaxSeqId=427, maxSeqId=1 2024-11-23T15:25:48,214 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393. 2024-11-23T15:25:48,214 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 9d175c2ab5829c897c27b4bef55dd393: 2024-11-23T15:25:48,215 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:48,216 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=9d175c2ab5829c897c27b4bef55dd393, regionState=CLOSED 2024-11-23T15:25:48,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-23T15:25:48,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 9d175c2ab5829c897c27b4bef55dd393, server=6a36843bf905,33811,1732375456985 in 1.4970 sec 2024-11-23T15:25:48,219 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-23T15:25:48,219 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9d175c2ab5829c897c27b4bef55dd393, UNASSIGN in 1.4990 sec 2024-11-23T15:25:48,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-23T15:25:48,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5010 sec 2024-11-23T15:25:48,222 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375548221"}]},"ts":"1732375548221"} 2024-11-23T15:25:48,222 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T15:25:48,225 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T15:25:48,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5140 sec 2024-11-23T15:25:48,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-23T15:25:48,818 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-23T15:25:48,818 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T15:25:48,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:48,820 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:48,820 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=97, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:48,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-23T15:25:48,822 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:48,824 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/recovered.edits] 2024-11-23T15:25:48,826 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/27325121496e4b378294a3baa3f55ddd to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/27325121496e4b378294a3baa3f55ddd 2024-11-23T15:25:48,827 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/a79990431e2a4947ab7d26c262d057f6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/a79990431e2a4947ab7d26c262d057f6 2024-11-23T15:25:48,828 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f569db4a67be4a4baa81d258cc88745e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/A/f569db4a67be4a4baa81d258cc88745e 2024-11-23T15:25:48,830 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/65ba9456359d45eca7656c30c12eace2 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/65ba9456359d45eca7656c30c12eace2 2024-11-23T15:25:48,832 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a03b245b798747f6aa50e8eaa503d74e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a03b245b798747f6aa50e8eaa503d74e 2024-11-23T15:25:48,833 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a73402bc85d544b2a76211d2b480dbf3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/B/a73402bc85d544b2a76211d2b480dbf3 2024-11-23T15:25:48,835 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/3c47635a45434f29a72144849d4cae90 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/3c47635a45434f29a72144849d4cae90 2024-11-23T15:25:48,836 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/5324e910a8274e20a813ab253ccbfd98 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/5324e910a8274e20a813ab253ccbfd98 2024-11-23T15:25:48,837 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/e9be86cfcf9946d29cda4943916215f3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/C/e9be86cfcf9946d29cda4943916215f3 2024-11-23T15:25:48,839 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/recovered.edits/427.seqid to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393/recovered.edits/427.seqid 2024-11-23T15:25:48,839 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/9d175c2ab5829c897c27b4bef55dd393 2024-11-23T15:25:48,840 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T15:25:48,841 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=97, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:48,846 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T15:25:48,847 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T15:25:48,848 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=97, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:48,848 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T15:25:48,849 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732375548848"}]},"ts":"9223372036854775807"} 2024-11-23T15:25:48,850 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T15:25:48,850 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9d175c2ab5829c897c27b4bef55dd393, NAME => 'TestAcidGuarantees,,1732375521509.9d175c2ab5829c897c27b4bef55dd393.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T15:25:48,850 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T15:25:48,850 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732375548850"}]},"ts":"9223372036854775807"} 2024-11-23T15:25:48,851 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T15:25:48,854 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=97, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:48,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 36 msec 2024-11-23T15:25:48,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-23T15:25:48,921 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-11-23T15:25:48,930 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=238 (was 238), OpenFileDescriptor=451 (was 455), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=273 (was 276), ProcessCount=11 (was 11), AvailableMemoryMB=3728 (was 3784) 2024-11-23T15:25:48,939 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=238, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=273, ProcessCount=11, AvailableMemoryMB=3727 2024-11-23T15:25:48,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T15:25:48,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:25:48,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:48,942 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T15:25:48,942 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:48,942 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 98 2024-11-23T15:25:48,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-23T15:25:48,942 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T15:25:48,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742129_1305 (size=963) 2024-11-23T15:25:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-23T15:25:49,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-23T15:25:49,349 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704 2024-11-23T15:25:49,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742130_1306 (size=53) 2024-11-23T15:25:49,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-23T15:25:49,755 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:25:49,755 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 1b51fcb9c5cc43364334a31573ca489e, disabling compactions & flushes 2024-11-23T15:25:49,755 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:49,755 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:49,755 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. after waiting 0 ms 2024-11-23T15:25:49,755 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:49,755 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:49,755 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:49,756 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T15:25:49,756 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732375549756"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732375549756"}]},"ts":"1732375549756"} 2024-11-23T15:25:49,757 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T15:25:49,758 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T15:25:49,758 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375549758"}]},"ts":"1732375549758"} 2024-11-23T15:25:49,759 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T15:25:49,762 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, ASSIGN}] 2024-11-23T15:25:49,763 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, ASSIGN 2024-11-23T15:25:49,764 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, ASSIGN; state=OFFLINE, location=6a36843bf905,33811,1732375456985; forceNewPlan=false, retain=false 2024-11-23T15:25:49,914 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=1b51fcb9c5cc43364334a31573ca489e, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:25:49,915 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; OpenRegionProcedure 1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:25:50,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-23T15:25:50,066 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:50,069 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:50,069 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7285): Opening region: {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:25:50,069 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:50,070 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:25:50,070 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7327): checking encryption for 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:50,070 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7330): checking classloading for 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:50,071 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:50,072 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:25:50,072 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1b51fcb9c5cc43364334a31573ca489e columnFamilyName A 2024-11-23T15:25:50,072 DEBUG [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:50,073 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(327): Store=1b51fcb9c5cc43364334a31573ca489e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:25:50,073 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:50,073 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:25:50,074 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1b51fcb9c5cc43364334a31573ca489e columnFamilyName B 2024-11-23T15:25:50,074 DEBUG [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:50,074 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(327): Store=1b51fcb9c5cc43364334a31573ca489e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:25:50,074 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:50,075 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:25:50,075 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1b51fcb9c5cc43364334a31573ca489e columnFamilyName C 2024-11-23T15:25:50,075 DEBUG [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:50,075 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(327): Store=1b51fcb9c5cc43364334a31573ca489e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:25:50,075 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:50,076 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:50,076 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:50,077 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:25:50,078 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1085): writing seq id for 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:50,079 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:25:50,080 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1102): Opened 1b51fcb9c5cc43364334a31573ca489e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73115381, jitterRate=0.08950407803058624}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:25:50,080 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1001): Region open journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:50,081 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., pid=100, masterSystemTime=1732375550066 2024-11-23T15:25:50,082 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:50,082 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:50,082 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=1b51fcb9c5cc43364334a31573ca489e, regionState=OPEN, openSeqNum=2, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:25:50,084 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-23T15:25:50,084 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; OpenRegionProcedure 1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 in 168 msec 2024-11-23T15:25:50,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-23T15:25:50,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, ASSIGN in 322 msec 2024-11-23T15:25:50,086 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T15:25:50,086 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375550086"}]},"ts":"1732375550086"} 2024-11-23T15:25:50,087 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T15:25:50,089 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T15:25:50,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1490 sec 2024-11-23T15:25:51,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-23T15:25:51,046 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-11-23T15:25:51,048 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b976e1a to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df61dc9 2024-11-23T15:25:51,052 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fe71801, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:51,053 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:51,054 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:51,055 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T15:25:51,055 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42884, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T15:25:51,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T15:25:51,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:25:51,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-23T15:25:51,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742131_1307 (size=999) 2024-11-23T15:25:51,467 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-23T15:25:51,467 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-23T15:25:51,469 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T15:25:51,471 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, REOPEN/MOVE}] 2024-11-23T15:25:51,471 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, REOPEN/MOVE 2024-11-23T15:25:51,472 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=1b51fcb9c5cc43364334a31573ca489e, regionState=CLOSING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:25:51,473 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T15:25:51,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure 1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:25:51,624 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:51,624 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,624 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T15:25:51,624 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing 1b51fcb9c5cc43364334a31573ca489e, disabling compactions & flushes 2024-11-23T15:25:51,624 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:51,624 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:51,624 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. after waiting 0 ms 2024-11-23T15:25:51,624 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:51,628 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-23T15:25:51,628 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:51,629 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:51,629 WARN [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionServer(3786): Not adding moved region record: 1b51fcb9c5cc43364334a31573ca489e to self. 2024-11-23T15:25:51,630 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,630 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=1b51fcb9c5cc43364334a31573ca489e, regionState=CLOSED 2024-11-23T15:25:51,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-11-23T15:25:51,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure 1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 in 158 msec 2024-11-23T15:25:51,632 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, REOPEN/MOVE; state=CLOSED, location=6a36843bf905,33811,1732375456985; forceNewPlan=false, retain=true 2024-11-23T15:25:51,783 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=1b51fcb9c5cc43364334a31573ca489e, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:25:51,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=103, state=RUNNABLE; OpenRegionProcedure 1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:25:51,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:51,938 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:51,938 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7285): Opening region: {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:25:51,938 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,938 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:25:51,939 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7327): checking encryption for 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,939 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7330): checking classloading for 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,940 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,940 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:25:51,941 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1b51fcb9c5cc43364334a31573ca489e columnFamilyName A 2024-11-23T15:25:51,942 DEBUG [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:51,942 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(327): Store=1b51fcb9c5cc43364334a31573ca489e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:25:51,942 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,943 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:25:51,943 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1b51fcb9c5cc43364334a31573ca489e columnFamilyName B 2024-11-23T15:25:51,943 DEBUG [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:51,943 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(327): Store=1b51fcb9c5cc43364334a31573ca489e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:25:51,943 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,944 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:25:51,944 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1b51fcb9c5cc43364334a31573ca489e columnFamilyName C 2024-11-23T15:25:51,944 DEBUG [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:51,944 INFO [StoreOpener-1b51fcb9c5cc43364334a31573ca489e-1 {}] regionserver.HStore(327): Store=1b51fcb9c5cc43364334a31573ca489e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:25:51,944 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:51,945 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,946 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,947 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:25:51,948 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1085): writing seq id for 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:51,949 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1102): Opened 1b51fcb9c5cc43364334a31573ca489e; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65022599, jitterRate=-0.03108777105808258}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:25:51,949 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1001): Region open journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:51,950 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., pid=105, masterSystemTime=1732375551935 2024-11-23T15:25:51,951 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:51,952 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:51,952 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=1b51fcb9c5cc43364334a31573ca489e, regionState=OPEN, openSeqNum=5, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:25:51,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=103 2024-11-23T15:25:51,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=103, state=SUCCESS; OpenRegionProcedure 1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 in 169 msec 2024-11-23T15:25:51,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-23T15:25:51,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, REOPEN/MOVE in 483 msec 2024-11-23T15:25:51,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-23T15:25:51,958 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 487 msec 2024-11-23T15:25:51,959 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 901 msec 2024-11-23T15:25:51,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-23T15:25:51,961 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b82ba2a to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3637e4c6 2024-11-23T15:25:51,967 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f7d511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:51,968 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b6cf8cb to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72f422b4 2024-11-23T15:25:51,971 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc42ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:51,972 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-11-23T15:25:51,977 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:51,978 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-11-23T15:25:51,982 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:51,983 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c336ea4 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167a78b0 2024-11-23T15:25:51,986 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aea41b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:51,987 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5aee939b to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e247aa1 2024-11-23T15:25:51,990 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@801ba40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:51,990 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-11-23T15:25:51,995 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:51,995 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-11-23T15:25:52,000 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:52,001 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-11-23T15:25:52,007 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:52,007 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b308f62 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@787e5169 2024-11-23T15:25:52,010 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7284f16d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:25:52,016 DEBUG [hconnection-0x5322bd5f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:52,016 DEBUG [hconnection-0x1900156e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:52,016 DEBUG [hconnection-0x190e9e00-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:52,017 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51342, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:52,017 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:52,018 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51328, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:52,018 DEBUG [hconnection-0x5f12489e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:52,019 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:52,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:25:52,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:25:52,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:52,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:25:52,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:52,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:25:52,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:52,030 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:52,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-23T15:25:52,031 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:52,031 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:52,031 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:52,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T15:25:52,036 DEBUG [hconnection-0x3bb60cc6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:52,037 DEBUG [hconnection-0x209851c9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:52,038 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51354, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:52,038 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51368, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:52,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375612051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375612051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375612051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,057 DEBUG [hconnection-0x2637abfd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:52,058 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51378, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:52,058 DEBUG [hconnection-0x66e1a4e2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:52,059 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:52,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375612060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233f1893c25979425bbb9eb1bcec87cf52_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375552026/Put/seqid=0 2024-11-23T15:25:52,068 DEBUG [hconnection-0x512dbd84-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:52,069 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51384, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:52,076 DEBUG [hconnection-0x775a5337-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:25:52,077 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:25:52,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375612079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742132_1308 (size=12154) 2024-11-23T15:25:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T15:25:52,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375612152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375612152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375612152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375612161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375612180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,183 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:52,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:52,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,184 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T15:25:52,336 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:52,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:52,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375612355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375612356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375612356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375612366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375612383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,490 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:52,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:52,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,491 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:52,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,495 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233f1893c25979425bbb9eb1bcec87cf52_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233f1893c25979425bbb9eb1bcec87cf52_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:52,496 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/350d3a12f9664df8b98aadd325d08058, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:25:52,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/350d3a12f9664df8b98aadd325d08058 is 175, key is test_row_0/A:col10/1732375552026/Put/seqid=0 2024-11-23T15:25:52,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742133_1309 (size=30955) 2024-11-23T15:25:52,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T15:25:52,642 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:52,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:52,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375612659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375612660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375612661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375612671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:52,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375612688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,795 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:52,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:52,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,901 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/350d3a12f9664df8b98aadd325d08058 2024-11-23T15:25:52,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/7c3c3bc78bbb4a9c9403df36fb61bcc1 is 50, key is test_row_0/B:col10/1732375552026/Put/seqid=0 2024-11-23T15:25:52,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742134_1310 (size=12001) 2024-11-23T15:25:52,948 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:52,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:52,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:52,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:52,949 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:52,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,101 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:53,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:53,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T15:25:53,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375613163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375613166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375613168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375613174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:53,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375613192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,254 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:53,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:53,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,254 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/7c3c3bc78bbb4a9c9403df36fb61bcc1 2024-11-23T15:25:53,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/79cc7be3c63c4d349530918c261ddf95 is 50, key is test_row_0/C:col10/1732375552026/Put/seqid=0 2024-11-23T15:25:53,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742135_1311 (size=12001) 2024-11-23T15:25:53,406 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:53,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:53,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,407 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,559 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:53,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:53,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,712 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:53,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:53,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:53,744 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T15:25:53,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/79cc7be3c63c4d349530918c261ddf95 2024-11-23T15:25:53,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/350d3a12f9664df8b98aadd325d08058 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/350d3a12f9664df8b98aadd325d08058 2024-11-23T15:25:53,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/350d3a12f9664df8b98aadd325d08058, entries=150, sequenceid=16, filesize=30.2 K 2024-11-23T15:25:53,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/7c3c3bc78bbb4a9c9403df36fb61bcc1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/7c3c3bc78bbb4a9c9403df36fb61bcc1 2024-11-23T15:25:53,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/7c3c3bc78bbb4a9c9403df36fb61bcc1, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T15:25:53,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/79cc7be3c63c4d349530918c261ddf95 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/79cc7be3c63c4d349530918c261ddf95 2024-11-23T15:25:53,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/79cc7be3c63c4d349530918c261ddf95, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T15:25:53,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 1b51fcb9c5cc43364334a31573ca489e in 1754ms, sequenceid=16, compaction requested=false 2024-11-23T15:25:53,782 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-23T15:25:53,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:53,864 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:53,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T15:25:53,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:53,865 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:25:53,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:25:53,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:53,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:25:53,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:53,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:25:53,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:53,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112386ea5970eb024ce4b80209ae2476cfa0_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375552049/Put/seqid=0 2024-11-23T15:25:53,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742136_1312 (size=12154) 2024-11-23T15:25:53,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:53,881 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112386ea5970eb024ce4b80209ae2476cfa0_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112386ea5970eb024ce4b80209ae2476cfa0_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:53,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/478d8eb427a543a4aa2209198924f38a, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:25:53,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/478d8eb427a543a4aa2209198924f38a is 175, key is test_row_0/A:col10/1732375552049/Put/seqid=0 2024-11-23T15:25:53,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742137_1313 (size=30955) 2024-11-23T15:25:54,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T15:25:54,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:54,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:54,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375614182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375614183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375614183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375614184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375614200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,287 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/478d8eb427a543a4aa2209198924f38a 2024-11-23T15:25:54,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375614288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375614288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375614289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375614289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/a52c1784f752479db240332a636cc0e6 is 50, key is test_row_0/B:col10/1732375552049/Put/seqid=0 2024-11-23T15:25:54,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742138_1314 (size=12001) 2024-11-23T15:25:54,298 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/a52c1784f752479db240332a636cc0e6 2024-11-23T15:25:54,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/6359688c365344f784cba62571d73c03 is 50, key is test_row_0/C:col10/1732375552049/Put/seqid=0 2024-11-23T15:25:54,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742139_1315 (size=12001) 2024-11-23T15:25:54,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375614493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375614494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375614494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375614494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,711 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/6359688c365344f784cba62571d73c03 2024-11-23T15:25:54,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/478d8eb427a543a4aa2209198924f38a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/478d8eb427a543a4aa2209198924f38a 2024-11-23T15:25:54,721 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/478d8eb427a543a4aa2209198924f38a, entries=150, sequenceid=41, filesize=30.2 K 2024-11-23T15:25:54,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/a52c1784f752479db240332a636cc0e6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a52c1784f752479db240332a636cc0e6 2024-11-23T15:25:54,725 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a52c1784f752479db240332a636cc0e6, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T15:25:54,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/6359688c365344f784cba62571d73c03 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6359688c365344f784cba62571d73c03 2024-11-23T15:25:54,729 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6359688c365344f784cba62571d73c03, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T15:25:54,729 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 1b51fcb9c5cc43364334a31573ca489e in 864ms, sequenceid=41, compaction requested=false 2024-11-23T15:25:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:54,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-23T15:25:54,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-23T15:25:54,731 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-23T15:25:54,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6990 sec 2024-11-23T15:25:54,733 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.7020 sec 2024-11-23T15:25:54,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:54,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:25:54,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:25:54,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:54,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:25:54,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:54,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:25:54,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:54,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232b9c2f5d071d420c8eb892d674e0d95d_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375554798/Put/seqid=0 2024-11-23T15:25:54,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742140_1316 (size=14594) 2024-11-23T15:25:54,821 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:54,825 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232b9c2f5d071d420c8eb892d674e0d95d_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232b9c2f5d071d420c8eb892d674e0d95d_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:54,826 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/c914442502fe43d79f90a79dd2c9daea, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:25:54,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/c914442502fe43d79f90a79dd2c9daea is 175, key is test_row_0/A:col10/1732375554798/Put/seqid=0 2024-11-23T15:25:54,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742141_1317 (size=39549) 2024-11-23T15:25:54,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375614825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375614826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375614830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375614832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375614933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375614933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375614937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:54,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:54,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375614938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375615140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375615140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375615145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375615145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,231 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/c914442502fe43d79f90a79dd2c9daea 2024-11-23T15:25:55,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/c8a3f7dc9b9243f6ae549962e43f4870 is 50, key is test_row_0/B:col10/1732375554798/Put/seqid=0 2024-11-23T15:25:55,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742142_1318 (size=12001) 2024-11-23T15:25:55,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375615444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375615445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375615449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375615452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/c8a3f7dc9b9243f6ae549962e43f4870 2024-11-23T15:25:55,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/7db99c6601f0484f8be3f800761f92e3 is 50, key is test_row_0/C:col10/1732375554798/Put/seqid=0 2024-11-23T15:25:55,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742143_1319 (size=12001) 2024-11-23T15:25:55,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375615947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375615951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375615951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:55,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375615958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/7db99c6601f0484f8be3f800761f92e3 2024-11-23T15:25:56,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/c914442502fe43d79f90a79dd2c9daea as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c914442502fe43d79f90a79dd2c9daea 2024-11-23T15:25:56,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c914442502fe43d79f90a79dd2c9daea, entries=200, sequenceid=54, filesize=38.6 K 2024-11-23T15:25:56,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/c8a3f7dc9b9243f6ae549962e43f4870 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/c8a3f7dc9b9243f6ae549962e43f4870 2024-11-23T15:25:56,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/c8a3f7dc9b9243f6ae549962e43f4870, entries=150, sequenceid=54, filesize=11.7 K 2024-11-23T15:25:56,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/7db99c6601f0484f8be3f800761f92e3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/7db99c6601f0484f8be3f800761f92e3 2024-11-23T15:25:56,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/7db99c6601f0484f8be3f800761f92e3, entries=150, sequenceid=54, filesize=11.7 K 2024-11-23T15:25:56,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 1b51fcb9c5cc43364334a31573ca489e in 1274ms, sequenceid=54, compaction requested=true 2024-11-23T15:25:56,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:56,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:25:56,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:56,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:25:56,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:56,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:25:56,072 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:56,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:56,072 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:56,073 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:56,073 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:56,073 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/B is initiating minor compaction (all files) 2024-11-23T15:25:56,073 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/A is initiating minor compaction (all files) 2024-11-23T15:25:56,073 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/B in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,073 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/A in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,073 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/350d3a12f9664df8b98aadd325d08058, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/478d8eb427a543a4aa2209198924f38a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c914442502fe43d79f90a79dd2c9daea] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=99.1 K 2024-11-23T15:25:56,073 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/7c3c3bc78bbb4a9c9403df36fb61bcc1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a52c1784f752479db240332a636cc0e6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/c8a3f7dc9b9243f6ae549962e43f4870] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=35.2 K 2024-11-23T15:25:56,073 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,073 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/350d3a12f9664df8b98aadd325d08058, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/478d8eb427a543a4aa2209198924f38a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c914442502fe43d79f90a79dd2c9daea] 2024-11-23T15:25:56,074 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c3c3bc78bbb4a9c9403df36fb61bcc1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732375552026 2024-11-23T15:25:56,074 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 350d3a12f9664df8b98aadd325d08058, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732375552026 2024-11-23T15:25:56,074 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a52c1784f752479db240332a636cc0e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732375552038 2024-11-23T15:25:56,074 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 478d8eb427a543a4aa2209198924f38a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732375552038 2024-11-23T15:25:56,074 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c8a3f7dc9b9243f6ae549962e43f4870, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375554182 2024-11-23T15:25:56,074 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c914442502fe43d79f90a79dd2c9daea, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375554182 2024-11-23T15:25:56,086 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:25:56,087 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#B#compaction#273 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:56,088 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/40e3b21e7f534a5db6e6272a1cde58d5 is 50, key is test_row_0/B:col10/1732375554798/Put/seqid=0 2024-11-23T15:25:56,092 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112309779edd92474b4d88f13b3322eba65a_1b51fcb9c5cc43364334a31573ca489e store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:25:56,095 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112309779edd92474b4d88f13b3322eba65a_1b51fcb9c5cc43364334a31573ca489e, store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:25:56,095 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112309779edd92474b4d88f13b3322eba65a_1b51fcb9c5cc43364334a31573ca489e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:25:56,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742144_1320 (size=12104) 2024-11-23T15:25:56,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742145_1321 (size=4469) 2024-11-23T15:25:56,104 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/40e3b21e7f534a5db6e6272a1cde58d5 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/40e3b21e7f534a5db6e6272a1cde58d5 2024-11-23T15:25:56,108 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/B of 1b51fcb9c5cc43364334a31573ca489e into 40e3b21e7f534a5db6e6272a1cde58d5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:56,108 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:56,108 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/B, priority=13, startTime=1732375556072; duration=0sec 2024-11-23T15:25:56,109 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:25:56,109 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:B 2024-11-23T15:25:56,109 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:25:56,109 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:25:56,110 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/C is initiating minor compaction (all files) 2024-11-23T15:25:56,110 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/C in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,110 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/79cc7be3c63c4d349530918c261ddf95, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6359688c365344f784cba62571d73c03, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/7db99c6601f0484f8be3f800761f92e3] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=35.2 K 2024-11-23T15:25:56,110 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 79cc7be3c63c4d349530918c261ddf95, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732375552026 2024-11-23T15:25:56,110 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6359688c365344f784cba62571d73c03, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732375552038 2024-11-23T15:25:56,111 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7db99c6601f0484f8be3f800761f92e3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375554182 2024-11-23T15:25:56,117 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#C#compaction#274 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:56,118 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/d4440cad78604a5d83a1630917b68d4b is 50, key is test_row_0/C:col10/1732375554798/Put/seqid=0 2024-11-23T15:25:56,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742146_1322 (size=12104) 2024-11-23T15:25:56,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T15:25:56,136 INFO [Thread-1416 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-23T15:25:56,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:25:56,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-23T15:25:56,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T15:25:56,139 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:25:56,139 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:25:56,139 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:25:56,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:56,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T15:25:56,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:25:56,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:56,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:25:56,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:56,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:25:56,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:56,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112308214c0e253d44239b787441be22f446_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375554831/Put/seqid=0 2024-11-23T15:25:56,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742147_1323 (size=14594) 2024-11-23T15:25:56,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T15:25:56,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:56,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375616261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T15:25:56,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:56,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:56,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375616371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T15:25:56,443 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T15:25:56,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:56,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,504 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#A#compaction#272 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:25:56,505 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/20973a57493249a193e17fa523f249e9 is 175, key is test_row_0/A:col10/1732375554798/Put/seqid=0 2024-11-23T15:25:56,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742148_1324 (size=31058) 2024-11-23T15:25:56,525 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/d4440cad78604a5d83a1630917b68d4b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/d4440cad78604a5d83a1630917b68d4b 2024-11-23T15:25:56,530 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/C of 1b51fcb9c5cc43364334a31573ca489e into d4440cad78604a5d83a1630917b68d4b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:56,530 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:56,530 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/C, priority=13, startTime=1732375556072; duration=0sec 2024-11-23T15:25:56,530 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:56,530 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:C 2024-11-23T15:25:56,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:56,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375616574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,596 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T15:25:56,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:56,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,630 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:56,634 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112308214c0e253d44239b787441be22f446_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112308214c0e253d44239b787441be22f446_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:56,635 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/a6003b208ab94f5c82af3cc87eee28d4, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:25:56,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/a6003b208ab94f5c82af3cc87eee28d4 is 175, key is test_row_0/A:col10/1732375554831/Put/seqid=0 2024-11-23T15:25:56,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742149_1325 (size=39549) 2024-11-23T15:25:56,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T15:25:56,749 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T15:25:56,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:56,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:56,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375616880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T15:25:56,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:56,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:56,903 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:56,914 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/20973a57493249a193e17fa523f249e9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/20973a57493249a193e17fa523f249e9 2024-11-23T15:25:56,918 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/A of 1b51fcb9c5cc43364334a31573ca489e into 20973a57493249a193e17fa523f249e9(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:25:56,918 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:56,918 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/A, priority=13, startTime=1732375556072; duration=0sec 2024-11-23T15:25:56,918 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:25:56,918 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:A 2024-11-23T15:25:56,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:56,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375616956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:56,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375616958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:56,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375616961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:56,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:56,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375616963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:57,040 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/a6003b208ab94f5c82af3cc87eee28d4 2024-11-23T15:25:57,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/f4367b1ed36e4fd3828e2b25d90184e4 is 50, key is test_row_0/B:col10/1732375554831/Put/seqid=0 2024-11-23T15:25:57,054 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:57,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T15:25:57,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:57,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:57,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:57,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:57,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:57,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:57,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742150_1326 (size=12001) 2024-11-23T15:25:57,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/f4367b1ed36e4fd3828e2b25d90184e4 2024-11-23T15:25:57,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/00f10cb888a54e20a6ed651c29edf91d is 50, key is test_row_0/C:col10/1732375554831/Put/seqid=0 2024-11-23T15:25:57,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742151_1327 (size=12001) 2024-11-23T15:25:57,207 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:57,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T15:25:57,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:57,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:57,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:57,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:57,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:57,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:57,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T15:25:57,360 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:57,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T15:25:57,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:57,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:57,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:57,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:57,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:57,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:25:57,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:57,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375617386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:57,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/00f10cb888a54e20a6ed651c29edf91d 2024-11-23T15:25:57,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/a6003b208ab94f5c82af3cc87eee28d4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/a6003b208ab94f5c82af3cc87eee28d4 2024-11-23T15:25:57,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/a6003b208ab94f5c82af3cc87eee28d4, entries=200, sequenceid=79, filesize=38.6 K 2024-11-23T15:25:57,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/f4367b1ed36e4fd3828e2b25d90184e4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/f4367b1ed36e4fd3828e2b25d90184e4 2024-11-23T15:25:57,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/f4367b1ed36e4fd3828e2b25d90184e4, entries=150, sequenceid=79, filesize=11.7 K 2024-11-23T15:25:57,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/00f10cb888a54e20a6ed651c29edf91d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/00f10cb888a54e20a6ed651c29edf91d 2024-11-23T15:25:57,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/00f10cb888a54e20a6ed651c29edf91d, entries=150, sequenceid=79, filesize=11.7 K 2024-11-23T15:25:57,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 1b51fcb9c5cc43364334a31573ca489e in 1270ms, sequenceid=79, compaction requested=false 2024-11-23T15:25:57,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:57,513 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:25:57,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T15:25:57,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:57,514 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:25:57,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:25:57,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:57,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:25:57,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:57,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:25:57,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:57,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123b24717760159490b9ae81cdad8837cde_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375556241/Put/seqid=0 2024-11-23T15:25:57,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742152_1328 (size=12154) 2024-11-23T15:25:57,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:25:57,928 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123b24717760159490b9ae81cdad8837cde_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123b24717760159490b9ae81cdad8837cde_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:57,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/eebeaf28e8e74499a98a3d64cb0e91a5, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:25:57,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/eebeaf28e8e74499a98a3d64cb0e91a5 is 175, key is test_row_0/A:col10/1732375556241/Put/seqid=0 2024-11-23T15:25:57,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742153_1329 (size=30955) 2024-11-23T15:25:58,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T15:25:58,335 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/eebeaf28e8e74499a98a3d64cb0e91a5 2024-11-23T15:25:58,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/0d998298572c420d91d7d8e05e2eb070 is 50, key is test_row_0/B:col10/1732375556241/Put/seqid=0 2024-11-23T15:25:58,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742154_1330 (size=12001) 2024-11-23T15:25:58,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:25:58,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:58,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:58,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375618482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:58,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:58,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375618586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:58,747 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/0d998298572c420d91d7d8e05e2eb070 2024-11-23T15:25:58,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/35c84c22947b42bbb3ec8dd3ed41bda3 is 50, key is test_row_0/C:col10/1732375556241/Put/seqid=0 2024-11-23T15:25:58,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742155_1331 (size=12001) 2024-11-23T15:25:58,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375618792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:58,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375618968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:58,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375618969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:58,973 DEBUG [Thread-1410 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:25:58,973 DEBUG [Thread-1408 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:25:58,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:58,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375618977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:58,979 DEBUG [Thread-1414 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:25:58,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:58,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375618978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:58,982 DEBUG [Thread-1412 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:25:59,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375619094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:59,158 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/35c84c22947b42bbb3ec8dd3ed41bda3 2024-11-23T15:25:59,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/eebeaf28e8e74499a98a3d64cb0e91a5 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eebeaf28e8e74499a98a3d64cb0e91a5 2024-11-23T15:25:59,167 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eebeaf28e8e74499a98a3d64cb0e91a5, entries=150, sequenceid=93, filesize=30.2 K 2024-11-23T15:25:59,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/0d998298572c420d91d7d8e05e2eb070 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/0d998298572c420d91d7d8e05e2eb070 2024-11-23T15:25:59,172 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/0d998298572c420d91d7d8e05e2eb070, entries=150, sequenceid=93, filesize=11.7 K 2024-11-23T15:25:59,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/35c84c22947b42bbb3ec8dd3ed41bda3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/35c84c22947b42bbb3ec8dd3ed41bda3 2024-11-23T15:25:59,177 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/35c84c22947b42bbb3ec8dd3ed41bda3, entries=150, sequenceid=93, filesize=11.7 K 2024-11-23T15:25:59,178 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1b51fcb9c5cc43364334a31573ca489e in 1665ms, sequenceid=93, compaction requested=true 2024-11-23T15:25:59,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:25:59,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:25:59,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-23T15:25:59,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-23T15:25:59,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-23T15:25:59,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0400 sec 2024-11-23T15:25:59,182 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 3.0440 sec 2024-11-23T15:25:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:25:59,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:25:59,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:25:59,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:59,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:25:59,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:59,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:25:59,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:25:59,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123ba36fd8f856d4486a70120f5b75ddd30_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375559599/Put/seqid=0 2024-11-23T15:25:59,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742156_1332 (size=14594) 2024-11-23T15:25:59,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:59,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375619644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:59,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375619747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:25:59,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:25:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375619951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:00,013 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:00,017 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123ba36fd8f856d4486a70120f5b75ddd30_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123ba36fd8f856d4486a70120f5b75ddd30_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:00,018 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/4a2b4e8719584746a4edbb4a03beef8e, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:00,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/4a2b4e8719584746a4edbb4a03beef8e is 175, key is test_row_0/A:col10/1732375559599/Put/seqid=0 2024-11-23T15:26:00,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742157_1333 (size=39549) 2024-11-23T15:26:00,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T15:26:00,243 INFO [Thread-1416 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-23T15:26:00,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:00,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-23T15:26:00,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T15:26:00,245 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:00,246 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:00,246 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:00,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375620254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:00,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T15:26:00,397 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:00,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-23T15:26:00,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:00,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:00,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:00,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,423 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/4a2b4e8719584746a4edbb4a03beef8e 2024-11-23T15:26:00,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/39d72d3633ea4511bda6d019118fcbee is 50, key is test_row_0/B:col10/1732375559599/Put/seqid=0 2024-11-23T15:26:00,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742158_1334 (size=12001) 2024-11-23T15:26:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T15:26:00,550 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:00,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-23T15:26:00,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:00,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:00,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:00,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,703 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:00,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-23T15:26:00,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:00,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:00,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:00,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:00,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375620762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:00,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/39d72d3633ea4511bda6d019118fcbee 2024-11-23T15:26:00,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/a41a00df0a8d456695fb2fd167d4d9cc is 50, key is test_row_0/C:col10/1732375559599/Put/seqid=0 2024-11-23T15:26:00,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742159_1335 (size=12001) 2024-11-23T15:26:00,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T15:26:00,856 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:00,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-23T15:26:00,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:00,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:00,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:00,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:00,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:01,009 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:01,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-23T15:26:01,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:01,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:01,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:01,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:01,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:01,162 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:01,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-23T15:26:01,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:01,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:01,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:01,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:01,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:01,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/a41a00df0a8d456695fb2fd167d4d9cc 2024-11-23T15:26:01,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/4a2b4e8719584746a4edbb4a03beef8e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/4a2b4e8719584746a4edbb4a03beef8e 2024-11-23T15:26:01,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/4a2b4e8719584746a4edbb4a03beef8e, entries=200, sequenceid=118, filesize=38.6 K 2024-11-23T15:26:01,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/39d72d3633ea4511bda6d019118fcbee as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/39d72d3633ea4511bda6d019118fcbee 2024-11-23T15:26:01,260 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/39d72d3633ea4511bda6d019118fcbee, entries=150, sequenceid=118, filesize=11.7 K 2024-11-23T15:26:01,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/a41a00df0a8d456695fb2fd167d4d9cc as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/a41a00df0a8d456695fb2fd167d4d9cc 2024-11-23T15:26:01,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/a41a00df0a8d456695fb2fd167d4d9cc, entries=150, sequenceid=118, filesize=11.7 K 2024-11-23T15:26:01,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 1b51fcb9c5cc43364334a31573ca489e in 1664ms, sequenceid=118, compaction requested=true 2024-11-23T15:26:01,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:01,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:01,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:01,265 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:01,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:01,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:01,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:01,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:01,265 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:01,266 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141111 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:01,266 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:01,267 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/A is initiating minor compaction (all files) 2024-11-23T15:26:01,267 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/B is initiating minor compaction (all files) 2024-11-23T15:26:01,267 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/A in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:01,267 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/B in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:01,267 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/20973a57493249a193e17fa523f249e9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/a6003b208ab94f5c82af3cc87eee28d4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eebeaf28e8e74499a98a3d64cb0e91a5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/4a2b4e8719584746a4edbb4a03beef8e] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=137.8 K 2024-11-23T15:26:01,267 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/40e3b21e7f534a5db6e6272a1cde58d5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/f4367b1ed36e4fd3828e2b25d90184e4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/0d998298572c420d91d7d8e05e2eb070, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/39d72d3633ea4511bda6d019118fcbee] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=47.0 K 2024-11-23T15:26:01,267 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:01,267 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/20973a57493249a193e17fa523f249e9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/a6003b208ab94f5c82af3cc87eee28d4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eebeaf28e8e74499a98a3d64cb0e91a5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/4a2b4e8719584746a4edbb4a03beef8e] 2024-11-23T15:26:01,267 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 40e3b21e7f534a5db6e6272a1cde58d5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375554182 2024-11-23T15:26:01,267 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20973a57493249a193e17fa523f249e9, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375554182 2024-11-23T15:26:01,267 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6003b208ab94f5c82af3cc87eee28d4, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732375554818 2024-11-23T15:26:01,267 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f4367b1ed36e4fd3828e2b25d90184e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732375554827 2024-11-23T15:26:01,268 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting eebeaf28e8e74499a98a3d64cb0e91a5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732375556236 2024-11-23T15:26:01,268 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d998298572c420d91d7d8e05e2eb070, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732375556236 2024-11-23T15:26:01,268 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 39d72d3633ea4511bda6d019118fcbee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375558476 2024-11-23T15:26:01,268 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a2b4e8719584746a4edbb4a03beef8e, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375558460 2024-11-23T15:26:01,275 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:01,276 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#B#compaction#284 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:01,277 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/4305f6d5ed704e0c8061947a1ba3a6ef is 50, key is test_row_0/B:col10/1732375559599/Put/seqid=0 2024-11-23T15:26:01,278 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411230e557c3816c04b8d9c6c5a708f05f7cb_1b51fcb9c5cc43364334a31573ca489e store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:01,280 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411230e557c3816c04b8d9c6c5a708f05f7cb_1b51fcb9c5cc43364334a31573ca489e, store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:01,280 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230e557c3816c04b8d9c6c5a708f05f7cb_1b51fcb9c5cc43364334a31573ca489e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:01,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742160_1336 (size=12241) 2024-11-23T15:26:01,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742161_1337 (size=4469) 2024-11-23T15:26:01,315 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:01,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-23T15:26:01,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:01,315 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:26:01,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:26:01,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:01,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:26:01,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:01,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:26:01,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:01,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236a5e2dcfe0c5402e958a2ca81b0211d6_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375559643/Put/seqid=0 2024-11-23T15:26:01,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742162_1338 (size=12154) 2024-11-23T15:26:01,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:01,332 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236a5e2dcfe0c5402e958a2ca81b0211d6_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236a5e2dcfe0c5402e958a2ca81b0211d6_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:01,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/314c64c0543940b1bbd462f14f8fc6ad, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:01,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/314c64c0543940b1bbd462f14f8fc6ad is 175, key is test_row_0/A:col10/1732375559643/Put/seqid=0 2024-11-23T15:26:01,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742163_1339 (size=30955) 2024-11-23T15:26:01,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T15:26:01,686 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#A#compaction#285 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:01,687 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/218ae65b56684266b3f4aea4121ac3d8 is 175, key is test_row_0/A:col10/1732375559599/Put/seqid=0 2024-11-23T15:26:01,688 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/4305f6d5ed704e0c8061947a1ba3a6ef as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/4305f6d5ed704e0c8061947a1ba3a6ef 2024-11-23T15:26:01,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742164_1340 (size=31195) 2024-11-23T15:26:01,693 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/B of 1b51fcb9c5cc43364334a31573ca489e into 4305f6d5ed704e0c8061947a1ba3a6ef(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:01,693 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:01,693 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/B, priority=12, startTime=1732375561265; duration=0sec 2024-11-23T15:26:01,693 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:01,693 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:B 2024-11-23T15:26:01,694 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:01,695 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:01,695 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/C is initiating minor compaction (all files) 2024-11-23T15:26:01,695 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/C in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:01,695 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/d4440cad78604a5d83a1630917b68d4b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/00f10cb888a54e20a6ed651c29edf91d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/35c84c22947b42bbb3ec8dd3ed41bda3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/a41a00df0a8d456695fb2fd167d4d9cc] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=47.0 K 2024-11-23T15:26:01,695 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting d4440cad78604a5d83a1630917b68d4b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375554182 2024-11-23T15:26:01,696 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 00f10cb888a54e20a6ed651c29edf91d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732375554827 2024-11-23T15:26:01,696 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 35c84c22947b42bbb3ec8dd3ed41bda3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732375556236 2024-11-23T15:26:01,697 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a41a00df0a8d456695fb2fd167d4d9cc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375558476 2024-11-23T15:26:01,707 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#C#compaction#287 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:01,708 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/44899797c75748c8a3d0dc8f3ae8d4c6 is 50, key is test_row_0/C:col10/1732375559599/Put/seqid=0 2024-11-23T15:26:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742165_1341 (size=12241) 2024-11-23T15:26:01,716 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/44899797c75748c8a3d0dc8f3ae8d4c6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/44899797c75748c8a3d0dc8f3ae8d4c6 2024-11-23T15:26:01,721 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/C of 1b51fcb9c5cc43364334a31573ca489e into 44899797c75748c8a3d0dc8f3ae8d4c6(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:01,721 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:01,721 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/C, priority=12, startTime=1732375561265; duration=0sec 2024-11-23T15:26:01,722 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:01,722 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:C 2024-11-23T15:26:01,738 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/314c64c0543940b1bbd462f14f8fc6ad 2024-11-23T15:26:01,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/621349a3dcb74aae9f9cbd14953ea086 is 50, key is test_row_0/B:col10/1732375559643/Put/seqid=0 2024-11-23T15:26:01,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742166_1342 (size=12001) 2024-11-23T15:26:01,754 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/621349a3dcb74aae9f9cbd14953ea086 2024-11-23T15:26:01,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/644e4db7ec9c49548e642024e600fb3f is 50, key is test_row_0/C:col10/1732375559643/Put/seqid=0 2024-11-23T15:26:01,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742167_1343 (size=12001) 2024-11-23T15:26:01,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:01,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:01,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375621905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:02,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375622010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,097 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/218ae65b56684266b3f4aea4121ac3d8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/218ae65b56684266b3f4aea4121ac3d8 2024-11-23T15:26:02,101 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/A of 1b51fcb9c5cc43364334a31573ca489e into 218ae65b56684266b3f4aea4121ac3d8(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:02,101 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:02,101 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/A, priority=12, startTime=1732375561265; duration=0sec 2024-11-23T15:26:02,101 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:02,101 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:A 2024-11-23T15:26:02,167 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/644e4db7ec9c49548e642024e600fb3f 2024-11-23T15:26:02,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/314c64c0543940b1bbd462f14f8fc6ad as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/314c64c0543940b1bbd462f14f8fc6ad 2024-11-23T15:26:02,175 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/314c64c0543940b1bbd462f14f8fc6ad, entries=150, sequenceid=129, filesize=30.2 K 2024-11-23T15:26:02,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/621349a3dcb74aae9f9cbd14953ea086 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/621349a3dcb74aae9f9cbd14953ea086 2024-11-23T15:26:02,179 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/621349a3dcb74aae9f9cbd14953ea086, entries=150, sequenceid=129, filesize=11.7 K 2024-11-23T15:26:02,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/644e4db7ec9c49548e642024e600fb3f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/644e4db7ec9c49548e642024e600fb3f 2024-11-23T15:26:02,183 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/644e4db7ec9c49548e642024e600fb3f, entries=150, sequenceid=129, filesize=11.7 K 2024-11-23T15:26:02,184 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 1b51fcb9c5cc43364334a31573ca489e in 869ms, sequenceid=129, compaction requested=false 2024-11-23T15:26:02,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:02,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:02,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-23T15:26:02,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-23T15:26:02,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-23T15:26:02,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9390 sec 2024-11-23T15:26:02,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.9430 sec 2024-11-23T15:26:02,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:02,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T15:26:02,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:26:02,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:02,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:26:02,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:02,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:26:02,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:02,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112390f89f2e1ece425f865de721874884d6_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375562219/Put/seqid=0 2024-11-23T15:26:02,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742168_1344 (size=14794) 2024-11-23T15:26:02,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:02,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375622247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T15:26:02,349 INFO [Thread-1416 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-23T15:26:02,350 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:02,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-23T15:26:02,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T15:26:02,352 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:02,352 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:02,353 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:02,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:02,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375622352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T15:26:02,504 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T15:26:02,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:02,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:02,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:02,505 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:02,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375622555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,645 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:02,650 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112390f89f2e1ece425f865de721874884d6_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112390f89f2e1ece425f865de721874884d6_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:02,651 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/9769b8152127404cbec2c731d806e885, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:02,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/9769b8152127404cbec2c731d806e885 is 175, key is test_row_0/A:col10/1732375562219/Put/seqid=0 2024-11-23T15:26:02,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T15:26:02,657 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,658 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T15:26:02,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:02,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:02,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:02,658 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742169_1345 (size=39749) 2024-11-23T15:26:02,680 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/9769b8152127404cbec2c731d806e885 2024-11-23T15:26:02,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/953a3fa99bcd4d9e9800d5cdfdb434e9 is 50, key is test_row_0/B:col10/1732375562219/Put/seqid=0 2024-11-23T15:26:02,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742170_1346 (size=12151) 2024-11-23T15:26:02,810 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T15:26:02,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:02,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:02,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:02,811 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:02,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375622858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T15:26:02,963 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T15:26:02,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:02,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:02,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:02,964 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:02,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:02,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51348 deadline: 1732375622986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,988 DEBUG [Thread-1408 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:02,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:02,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51378 deadline: 1732375622989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,992 DEBUG [Thread-1412 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:02,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:02,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51354 deadline: 1732375622996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:02,999 DEBUG [Thread-1410 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:03,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51328 deadline: 1732375623013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:03,019 DEBUG [Thread-1414 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8189 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:03,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/953a3fa99bcd4d9e9800d5cdfdb434e9 2024-11-23T15:26:03,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/afc5f827da4b4cc4b8a05106ea5ba3ca is 50, key is test_row_0/C:col10/1732375562219/Put/seqid=0 2024-11-23T15:26:03,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742171_1347 (size=12151) 2024-11-23T15:26:03,116 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:03,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T15:26:03,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:03,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:03,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:03,268 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:03,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T15:26:03,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:03,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:03,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:03,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:03,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:03,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375623365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:03,421 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:03,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T15:26:03,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:03,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:03,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:03,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:03,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T15:26:03,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/afc5f827da4b4cc4b8a05106ea5ba3ca 2024-11-23T15:26:03,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/9769b8152127404cbec2c731d806e885 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/9769b8152127404cbec2c731d806e885 2024-11-23T15:26:03,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/9769b8152127404cbec2c731d806e885, entries=200, sequenceid=158, filesize=38.8 K 2024-11-23T15:26:03,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/953a3fa99bcd4d9e9800d5cdfdb434e9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/953a3fa99bcd4d9e9800d5cdfdb434e9 2024-11-23T15:26:03,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/953a3fa99bcd4d9e9800d5cdfdb434e9, entries=150, sequenceid=158, filesize=11.9 K 2024-11-23T15:26:03,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/afc5f827da4b4cc4b8a05106ea5ba3ca as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/afc5f827da4b4cc4b8a05106ea5ba3ca 2024-11-23T15:26:03,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/afc5f827da4b4cc4b8a05106ea5ba3ca, entries=150, sequenceid=158, filesize=11.9 K 2024-11-23T15:26:03,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 1b51fcb9c5cc43364334a31573ca489e in 1300ms, sequenceid=158, compaction requested=true 2024-11-23T15:26:03,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:03,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:03,520 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:03,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:03,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:03,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:03,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:03,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:03,520 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:03,521 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101899 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:03,521 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/A is initiating minor compaction (all files) 2024-11-23T15:26:03,521 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/A in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,521 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/218ae65b56684266b3f4aea4121ac3d8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/314c64c0543940b1bbd462f14f8fc6ad, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/9769b8152127404cbec2c731d806e885] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=99.5 K 2024-11-23T15:26:03,521 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,521 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:03,521 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/218ae65b56684266b3f4aea4121ac3d8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/314c64c0543940b1bbd462f14f8fc6ad, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/9769b8152127404cbec2c731d806e885] 2024-11-23T15:26:03,522 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/B is initiating minor compaction (all files) 2024-11-23T15:26:03,522 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/B in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,522 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/4305f6d5ed704e0c8061947a1ba3a6ef, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/621349a3dcb74aae9f9cbd14953ea086, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/953a3fa99bcd4d9e9800d5cdfdb434e9] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=35.5 K 2024-11-23T15:26:03,522 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4305f6d5ed704e0c8061947a1ba3a6ef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375558476 2024-11-23T15:26:03,522 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 218ae65b56684266b3f4aea4121ac3d8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375558476 2024-11-23T15:26:03,523 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 621349a3dcb74aae9f9cbd14953ea086, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732375559619 2024-11-23T15:26:03,523 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 314c64c0543940b1bbd462f14f8fc6ad, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732375559619 2024-11-23T15:26:03,523 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9769b8152127404cbec2c731d806e885, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732375561855 2024-11-23T15:26:03,523 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 953a3fa99bcd4d9e9800d5cdfdb434e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732375561855 2024-11-23T15:26:03,529 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:03,530 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#B#compaction#294 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:03,531 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/a2cedf4f5305426fbfa57764efd7515c is 50, key is test_row_0/B:col10/1732375562219/Put/seqid=0 2024-11-23T15:26:03,536 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123218642bfbdd64735a74f0bbd34ba3970_1b51fcb9c5cc43364334a31573ca489e store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:03,539 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123218642bfbdd64735a74f0bbd34ba3970_1b51fcb9c5cc43364334a31573ca489e, store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:03,539 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123218642bfbdd64735a74f0bbd34ba3970_1b51fcb9c5cc43364334a31573ca489e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:03,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742172_1348 (size=12493) 2024-11-23T15:26:03,554 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/a2cedf4f5305426fbfa57764efd7515c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a2cedf4f5305426fbfa57764efd7515c 2024-11-23T15:26:03,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742173_1349 (size=4469) 2024-11-23T15:26:03,563 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/B of 1b51fcb9c5cc43364334a31573ca489e into a2cedf4f5305426fbfa57764efd7515c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:03,563 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:03,563 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/B, priority=13, startTime=1732375563520; duration=0sec 2024-11-23T15:26:03,563 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:03,563 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:B 2024-11-23T15:26:03,563 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:03,564 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:03,564 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/C is initiating minor compaction (all files) 2024-11-23T15:26:03,564 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/C in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,564 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/44899797c75748c8a3d0dc8f3ae8d4c6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/644e4db7ec9c49548e642024e600fb3f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/afc5f827da4b4cc4b8a05106ea5ba3ca] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=35.5 K 2024-11-23T15:26:03,565 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 44899797c75748c8a3d0dc8f3ae8d4c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375558476 2024-11-23T15:26:03,565 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 644e4db7ec9c49548e642024e600fb3f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732375559619 2024-11-23T15:26:03,566 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting afc5f827da4b4cc4b8a05106ea5ba3ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732375561855 2024-11-23T15:26:03,573 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#C#compaction#295 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:03,573 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/5daccf0f05cd4877a9170df96a585b50 is 50, key is test_row_0/C:col10/1732375562219/Put/seqid=0 2024-11-23T15:26:03,574 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:03,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T15:26:03,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:03,575 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T15:26:03,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:26:03,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:03,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:26:03,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:03,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:26:03,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:03,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f7065cfe1cd24872aeea8f6855e60cde_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375562223/Put/seqid=0 2024-11-23T15:26:03,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742174_1350 (size=12493) 2024-11-23T15:26:03,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742175_1351 (size=12304) 2024-11-23T15:26:03,961 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#A#compaction#293 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:03,962 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/16ee2c4e0b15424191e94197ba61affd is 175, key is test_row_0/A:col10/1732375562219/Put/seqid=0 2024-11-23T15:26:03,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742176_1352 (size=31447) 2024-11-23T15:26:04,010 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/5daccf0f05cd4877a9170df96a585b50 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/5daccf0f05cd4877a9170df96a585b50 2024-11-23T15:26:04,015 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/C of 1b51fcb9c5cc43364334a31573ca489e into 5daccf0f05cd4877a9170df96a585b50(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:04,015 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:04,015 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/C, priority=13, startTime=1732375563520; duration=0sec 2024-11-23T15:26:04,015 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:04,015 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:C 2024-11-23T15:26:04,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:04,030 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f7065cfe1cd24872aeea8f6855e60cde_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f7065cfe1cd24872aeea8f6855e60cde_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:04,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/5687745c558b4133b8e5db9b95727dce, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:04,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/5687745c558b4133b8e5db9b95727dce is 175, key is test_row_0/A:col10/1732375562223/Put/seqid=0 2024-11-23T15:26:04,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742177_1353 (size=31105) 2024-11-23T15:26:04,050 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/5687745c558b4133b8e5db9b95727dce 2024-11-23T15:26:04,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/37deadbdc3404824a7dc3d3232bfa2ba is 50, key is test_row_0/B:col10/1732375562223/Put/seqid=0 2024-11-23T15:26:04,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742178_1354 (size=12151) 2024-11-23T15:26:04,078 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/37deadbdc3404824a7dc3d3232bfa2ba 2024-11-23T15:26:04,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/0d2e4486f34543a8a5bfb15d2826685f is 50, key is test_row_0/C:col10/1732375562223/Put/seqid=0 2024-11-23T15:26:04,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742179_1355 (size=12151) 2024-11-23T15:26:04,371 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/16ee2c4e0b15424191e94197ba61affd as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/16ee2c4e0b15424191e94197ba61affd 2024-11-23T15:26:04,375 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/A of 1b51fcb9c5cc43364334a31573ca489e into 16ee2c4e0b15424191e94197ba61affd(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:04,375 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:04,375 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/A, priority=13, startTime=1732375563520; duration=0sec 2024-11-23T15:26:04,376 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:04,376 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:A 2024-11-23T15:26:04,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:04,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:04,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T15:26:04,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:04,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375624483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:04,512 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/0d2e4486f34543a8a5bfb15d2826685f 2024-11-23T15:26:04,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/5687745c558b4133b8e5db9b95727dce as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/5687745c558b4133b8e5db9b95727dce 2024-11-23T15:26:04,519 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/5687745c558b4133b8e5db9b95727dce, entries=150, sequenceid=169, filesize=30.4 K 2024-11-23T15:26:04,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/37deadbdc3404824a7dc3d3232bfa2ba as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/37deadbdc3404824a7dc3d3232bfa2ba 2024-11-23T15:26:04,523 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/37deadbdc3404824a7dc3d3232bfa2ba, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T15:26:04,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/0d2e4486f34543a8a5bfb15d2826685f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0d2e4486f34543a8a5bfb15d2826685f 2024-11-23T15:26:04,527 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0d2e4486f34543a8a5bfb15d2826685f, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T15:26:04,528 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 1b51fcb9c5cc43364334a31573ca489e in 953ms, sequenceid=169, compaction requested=false 2024-11-23T15:26:04,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:04,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:04,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-23T15:26:04,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-23T15:26:04,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-23T15:26:04,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1760 sec 2024-11-23T15:26:04,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.1810 sec 2024-11-23T15:26:04,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:04,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T15:26:04,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:26:04,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:04,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:26:04,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:04,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:26:04,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:04,600 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112353a83cf0c32746729e4497a6f28224de_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375564428/Put/seqid=0 2024-11-23T15:26:04,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742180_1356 (size=14794) 2024-11-23T15:26:04,605 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:04,608 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112353a83cf0c32746729e4497a6f28224de_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112353a83cf0c32746729e4497a6f28224de_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:04,609 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/c5800bb88a3b411fb4b8d7e36516d41e, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:04,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/c5800bb88a3b411fb4b8d7e36516d41e is 175, key is test_row_0/A:col10/1732375564428/Put/seqid=0 2024-11-23T15:26:04,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742181_1357 (size=39749) 2024-11-23T15:26:04,614 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/c5800bb88a3b411fb4b8d7e36516d41e 2024-11-23T15:26:04,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:04,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375624621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:04,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/e1dde2623e1948af8e77a8145a855e3e is 50, key is test_row_0/B:col10/1732375564428/Put/seqid=0 2024-11-23T15:26:04,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742182_1358 (size=12151) 2024-11-23T15:26:04,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:04,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375624727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:04,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:04,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375624931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:05,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/e1dde2623e1948af8e77a8145a855e3e 2024-11-23T15:26:05,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/3a90b096c3724c81b4445f43044b050c is 50, key is test_row_0/C:col10/1732375564428/Put/seqid=0 2024-11-23T15:26:05,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742183_1359 (size=12151) 2024-11-23T15:26:05,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:05,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375625235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:05,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/3a90b096c3724c81b4445f43044b050c 2024-11-23T15:26:05,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/c5800bb88a3b411fb4b8d7e36516d41e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c5800bb88a3b411fb4b8d7e36516d41e 2024-11-23T15:26:05,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c5800bb88a3b411fb4b8d7e36516d41e, entries=200, sequenceid=198, filesize=38.8 K 2024-11-23T15:26:05,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/e1dde2623e1948af8e77a8145a855e3e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/e1dde2623e1948af8e77a8145a855e3e 2024-11-23T15:26:05,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/e1dde2623e1948af8e77a8145a855e3e, entries=150, sequenceid=198, filesize=11.9 K 2024-11-23T15:26:05,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/3a90b096c3724c81b4445f43044b050c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3a90b096c3724c81b4445f43044b050c 2024-11-23T15:26:05,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3a90b096c3724c81b4445f43044b050c, entries=150, sequenceid=198, filesize=11.9 K 2024-11-23T15:26:05,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 1b51fcb9c5cc43364334a31573ca489e in 887ms, sequenceid=198, compaction requested=true 2024-11-23T15:26:05,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:05,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:05,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:05,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:05,480 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:05,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:05,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:05,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:05,480 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:05,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,481 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:05,481 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:05,481 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/B is initiating minor compaction (all files) 2024-11-23T15:26:05,481 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/A is initiating minor compaction (all files) 2024-11-23T15:26:05,481 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/A in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:05,481 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/B in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:05,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,481 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/16ee2c4e0b15424191e94197ba61affd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/5687745c558b4133b8e5db9b95727dce, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c5800bb88a3b411fb4b8d7e36516d41e] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=99.9 K 2024-11-23T15:26:05,481 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a2cedf4f5305426fbfa57764efd7515c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/37deadbdc3404824a7dc3d3232bfa2ba, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/e1dde2623e1948af8e77a8145a855e3e] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=35.9 K 2024-11-23T15:26:05,481 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:05,482 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/16ee2c4e0b15424191e94197ba61affd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/5687745c558b4133b8e5db9b95727dce, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c5800bb88a3b411fb4b8d7e36516d41e] 2024-11-23T15:26:05,482 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a2cedf4f5305426fbfa57764efd7515c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732375561855 2024-11-23T15:26:05,482 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16ee2c4e0b15424191e94197ba61affd, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732375561855 2024-11-23T15:26:05,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,482 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 37deadbdc3404824a7dc3d3232bfa2ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732375562223 2024-11-23T15:26:05,482 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5687745c558b4133b8e5db9b95727dce, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732375562223 2024-11-23T15:26:05,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,482 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting e1dde2623e1948af8e77a8145a855e3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732375564428 2024-11-23T15:26:05,483 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5800bb88a3b411fb4b8d7e36516d41e, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732375564428 2024-11-23T15:26:05,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,501 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:05,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,502 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#B#compaction#302 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:05,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,502 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/b9acb12d332645c981b15df86458afd9 is 50, key is test_row_0/B:col10/1732375564428/Put/seqid=0 2024-11-23T15:26:05,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,505 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411232418a7df3fe74be685363237101644ab_1b51fcb9c5cc43364334a31573ca489e store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742184_1360 (size=12595) 2024-11-23T15:26:05,507 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411232418a7df3fe74be685363237101644ab_1b51fcb9c5cc43364334a31573ca489e, store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:05,507 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232418a7df3fe74be685363237101644ab_1b51fcb9c5cc43364334a31573ca489e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:05,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742185_1361 (size=4469) 2024-11-23T15:26:05,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,515 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#A#compaction#303 average throughput is 1.75 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:05,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,515 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/eb08d221c4834885a59bb705046db278 is 175, key is test_row_0/A:col10/1732375564428/Put/seqid=0 2024-11-23T15:26:05,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742186_1362 (size=31549) 2024-11-23T15:26:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:05,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:26:05,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:26:05,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:05,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:26:05,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:05,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:26:05,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:05,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230ac289dcc12f417990c0ac65309c34a4_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375564614/Put/seqid=0 2024-11-23T15:26:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742187_1363 (size=12304) 2024-11-23T15:26:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:05,912 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/b9acb12d332645c981b15df86458afd9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/b9acb12d332645c981b15df86458afd9 2024-11-23T15:26:05,916 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/B of 1b51fcb9c5cc43364334a31573ca489e into b9acb12d332645c981b15df86458afd9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:05,916 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:05,916 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/B, priority=13, startTime=1732375565480; duration=0sec 2024-11-23T15:26:05,916 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:05,916 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:B 2024-11-23T15:26:05,916 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:05,918 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:05,918 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/C is initiating minor compaction (all files) 2024-11-23T15:26:05,918 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/C in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:05,918 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/5daccf0f05cd4877a9170df96a585b50, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0d2e4486f34543a8a5bfb15d2826685f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3a90b096c3724c81b4445f43044b050c] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=35.9 K 2024-11-23T15:26:05,919 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5daccf0f05cd4877a9170df96a585b50, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732375561855 2024-11-23T15:26:05,919 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d2e4486f34543a8a5bfb15d2826685f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732375562223 2024-11-23T15:26:05,919 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a90b096c3724c81b4445f43044b050c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732375564428 2024-11-23T15:26:05,927 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/eb08d221c4834885a59bb705046db278 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eb08d221c4834885a59bb705046db278 2024-11-23T15:26:05,933 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#C#compaction#305 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:05,934 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/A of 1b51fcb9c5cc43364334a31573ca489e into eb08d221c4834885a59bb705046db278(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:05,934 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:05,934 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/A, priority=13, startTime=1732375565480; duration=0sec 2024-11-23T15:26:05,934 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:05,934 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:A 2024-11-23T15:26:05,934 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/6f94ffeeb7fb448180c197757a170a57 is 50, key is test_row_0/C:col10/1732375564428/Put/seqid=0 2024-11-23T15:26:05,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742188_1364 (size=12595) 2024-11-23T15:26:05,948 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/6f94ffeeb7fb448180c197757a170a57 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6f94ffeeb7fb448180c197757a170a57 2024-11-23T15:26:05,957 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/C of 1b51fcb9c5cc43364334a31573ca489e into 6f94ffeeb7fb448180c197757a170a57(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:05,957 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:05,957 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/C, priority=13, startTime=1732375565480; duration=0sec 2024-11-23T15:26:05,957 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:05,957 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:C 2024-11-23T15:26:06,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:06,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375626008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:06,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375626116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:06,208 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:06,214 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230ac289dcc12f417990c0ac65309c34a4_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230ac289dcc12f417990c0ac65309c34a4_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:06,214 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/71829370bbdf4f0d803db9e1f68db00c, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:06,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/71829370bbdf4f0d803db9e1f68db00c is 175, key is test_row_0/A:col10/1732375564614/Put/seqid=0 2024-11-23T15:26:06,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742189_1365 (size=31101) 2024-11-23T15:26:06,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375626318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:06,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T15:26:06,456 INFO [Thread-1416 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-23T15:26:06,458 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:06,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-23T15:26:06,459 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:06,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T15:26:06,460 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:06,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:06,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T15:26:06,611 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:06,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T15:26:06,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:06,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:06,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:06,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:06,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:06,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:06,620 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=209, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/71829370bbdf4f0d803db9e1f68db00c 2024-11-23T15:26:06,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375626623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:06,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/ec644308aec04c958fe87304bce98fa0 is 50, key is test_row_0/B:col10/1732375564614/Put/seqid=0 2024-11-23T15:26:06,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742190_1366 (size=9757) 2024-11-23T15:26:06,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/ec644308aec04c958fe87304bce98fa0 2024-11-23T15:26:06,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/daea28aece32411ab826bd889b346eb9 is 50, key is test_row_0/C:col10/1732375564614/Put/seqid=0 2024-11-23T15:26:06,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742191_1367 (size=9757) 2024-11-23T15:26:06,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T15:26:06,765 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:06,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T15:26:06,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:06,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:06,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:06,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:06,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:06,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:06,918 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:06,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T15:26:06,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:06,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:06,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:06,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:06,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:06,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:07,043 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/daea28aece32411ab826bd889b346eb9 2024-11-23T15:26:07,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/71829370bbdf4f0d803db9e1f68db00c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/71829370bbdf4f0d803db9e1f68db00c 2024-11-23T15:26:07,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/71829370bbdf4f0d803db9e1f68db00c, entries=150, sequenceid=209, filesize=30.4 K 2024-11-23T15:26:07,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/ec644308aec04c958fe87304bce98fa0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/ec644308aec04c958fe87304bce98fa0 2024-11-23T15:26:07,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/ec644308aec04c958fe87304bce98fa0, entries=100, sequenceid=209, filesize=9.5 K 2024-11-23T15:26:07,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/daea28aece32411ab826bd889b346eb9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/daea28aece32411ab826bd889b346eb9 2024-11-23T15:26:07,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/daea28aece32411ab826bd889b346eb9, entries=100, sequenceid=209, filesize=9.5 K 2024-11-23T15:26:07,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 1b51fcb9c5cc43364334a31573ca489e in 1266ms, sequenceid=209, compaction requested=false 2024-11-23T15:26:07,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:07,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T15:26:07,070 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:07,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T15:26:07,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:07,071 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:26:07,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:26:07,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:07,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:26:07,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:07,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:26:07,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:07,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a69c46b4e35c4ff29bfa43eee7b7004a_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375565958/Put/seqid=0 2024-11-23T15:26:07,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742192_1368 (size=12304) 2024-11-23T15:26:07,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:07,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:07,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:07,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375627163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:07,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:07,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375627272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:07,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:07,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375627476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:07,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:07,488 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a69c46b4e35c4ff29bfa43eee7b7004a_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a69c46b4e35c4ff29bfa43eee7b7004a_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:07,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/8010aebc23a248419b85fea04865bb60, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:07,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/8010aebc23a248419b85fea04865bb60 is 175, key is test_row_0/A:col10/1732375565958/Put/seqid=0 2024-11-23T15:26:07,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742193_1369 (size=31105) 2024-11-23T15:26:07,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T15:26:07,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:07,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375627782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:07,909 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/8010aebc23a248419b85fea04865bb60 2024-11-23T15:26:07,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/df604f9aeaeb4a3e906d5226b4fb5c7a is 50, key is test_row_0/B:col10/1732375565958/Put/seqid=0 2024-11-23T15:26:07,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742194_1370 (size=12151) 2024-11-23T15:26:08,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375628286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:08,321 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/df604f9aeaeb4a3e906d5226b4fb5c7a 2024-11-23T15:26:08,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/0b07adbe2baa499189da1f3fc1b75e4e is 50, key is test_row_0/C:col10/1732375565958/Put/seqid=0 2024-11-23T15:26:08,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742195_1371 (size=12151) 2024-11-23T15:26:08,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T15:26:08,733 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/0b07adbe2baa499189da1f3fc1b75e4e 2024-11-23T15:26:08,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/8010aebc23a248419b85fea04865bb60 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/8010aebc23a248419b85fea04865bb60 2024-11-23T15:26:08,742 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/8010aebc23a248419b85fea04865bb60, entries=150, sequenceid=237, filesize=30.4 K 2024-11-23T15:26:08,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/df604f9aeaeb4a3e906d5226b4fb5c7a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/df604f9aeaeb4a3e906d5226b4fb5c7a 2024-11-23T15:26:08,746 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/df604f9aeaeb4a3e906d5226b4fb5c7a, entries=150, sequenceid=237, filesize=11.9 K 2024-11-23T15:26:08,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/0b07adbe2baa499189da1f3fc1b75e4e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0b07adbe2baa499189da1f3fc1b75e4e 2024-11-23T15:26:08,750 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0b07adbe2baa499189da1f3fc1b75e4e, entries=150, sequenceid=237, filesize=11.9 K 2024-11-23T15:26:08,751 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 1b51fcb9c5cc43364334a31573ca489e in 1680ms, sequenceid=237, compaction requested=true 2024-11-23T15:26:08,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:08,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:08,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-23T15:26:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-23T15:26:08,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-23T15:26:08,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2920 sec 2024-11-23T15:26:08,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.2960 sec 2024-11-23T15:26:09,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:09,294 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:09,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:26:09,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:09,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:26:09,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:09,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:26:09,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:09,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d06a82892c574f9b88bdc6973eaa900e_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375569292/Put/seqid=0 2024-11-23T15:26:09,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742196_1372 (size=14794) 2024-11-23T15:26:09,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:09,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375629396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:09,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:09,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375629502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:09,706 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:09,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:09,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375629704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:09,709 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d06a82892c574f9b88bdc6973eaa900e_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d06a82892c574f9b88bdc6973eaa900e_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:09,710 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/d58e05eeb86148649d9528dc1c797b83, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:09,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/d58e05eeb86148649d9528dc1c797b83 is 175, key is test_row_0/A:col10/1732375569292/Put/seqid=0 2024-11-23T15:26:09,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742197_1373 (size=39749) 2024-11-23T15:26:10,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:10,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375630008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:10,116 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=249, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/d58e05eeb86148649d9528dc1c797b83 2024-11-23T15:26:10,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/2ed2c3bb62054f6d96ff65cd366d2f02 is 50, key is test_row_0/B:col10/1732375569292/Put/seqid=0 2024-11-23T15:26:10,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742198_1374 (size=12151) 2024-11-23T15:26:10,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/2ed2c3bb62054f6d96ff65cd366d2f02 2024-11-23T15:26:10,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/3d694fdcfe5f405d88b7bbbc402c71e0 is 50, key is test_row_0/C:col10/1732375569292/Put/seqid=0 2024-11-23T15:26:10,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742199_1375 (size=12151) 2024-11-23T15:26:10,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:10,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375630514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:10,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/3d694fdcfe5f405d88b7bbbc402c71e0 2024-11-23T15:26:10,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/d58e05eeb86148649d9528dc1c797b83 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d58e05eeb86148649d9528dc1c797b83 2024-11-23T15:26:10,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d58e05eeb86148649d9528dc1c797b83, entries=200, sequenceid=249, filesize=38.8 K 2024-11-23T15:26:10,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/2ed2c3bb62054f6d96ff65cd366d2f02 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/2ed2c3bb62054f6d96ff65cd366d2f02 2024-11-23T15:26:10,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/2ed2c3bb62054f6d96ff65cd366d2f02, entries=150, sequenceid=249, filesize=11.9 K 2024-11-23T15:26:10,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/3d694fdcfe5f405d88b7bbbc402c71e0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3d694fdcfe5f405d88b7bbbc402c71e0 2024-11-23T15:26:10,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3d694fdcfe5f405d88b7bbbc402c71e0, entries=150, sequenceid=249, filesize=11.9 K 2024-11-23T15:26:10,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 1b51fcb9c5cc43364334a31573ca489e in 1264ms, sequenceid=249, compaction requested=true 2024-11-23T15:26:10,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:10,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:10,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:10,558 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:10,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:10,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:10,558 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:10,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:10,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:10,559 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46654 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:10,559 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133504 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:10,559 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/B is initiating minor compaction (all files) 2024-11-23T15:26:10,559 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/A is initiating minor compaction (all files) 2024-11-23T15:26:10,559 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/B in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:10,559 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/A in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:10,559 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/b9acb12d332645c981b15df86458afd9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/ec644308aec04c958fe87304bce98fa0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/df604f9aeaeb4a3e906d5226b4fb5c7a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/2ed2c3bb62054f6d96ff65cd366d2f02] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=45.6 K 2024-11-23T15:26:10,559 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eb08d221c4834885a59bb705046db278, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/71829370bbdf4f0d803db9e1f68db00c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/8010aebc23a248419b85fea04865bb60, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d58e05eeb86148649d9528dc1c797b83] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=130.4 K 2024-11-23T15:26:10,559 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:10,559 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eb08d221c4834885a59bb705046db278, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/71829370bbdf4f0d803db9e1f68db00c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/8010aebc23a248419b85fea04865bb60, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d58e05eeb86148649d9528dc1c797b83] 2024-11-23T15:26:10,560 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9acb12d332645c981b15df86458afd9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732375564428 2024-11-23T15:26:10,560 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting eb08d221c4834885a59bb705046db278, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732375564428 2024-11-23T15:26:10,560 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec644308aec04c958fe87304bce98fa0, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732375564614 2024-11-23T15:26:10,560 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 71829370bbdf4f0d803db9e1f68db00c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732375564614 2024-11-23T15:26:10,560 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting df604f9aeaeb4a3e906d5226b4fb5c7a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732375565958 2024-11-23T15:26:10,560 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 8010aebc23a248419b85fea04865bb60, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732375565958 2024-11-23T15:26:10,560 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ed2c3bb62054f6d96ff65cd366d2f02, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732375567146 2024-11-23T15:26:10,561 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting d58e05eeb86148649d9528dc1c797b83, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732375567146 2024-11-23T15:26:10,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T15:26:10,565 INFO [Thread-1416 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-23T15:26:10,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:10,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-23T15:26:10,569 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:10,569 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#B#compaction#314 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:10,570 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/fb58ec12f70a48f485fcc8feeffda6c4 is 50, key is test_row_0/B:col10/1732375569292/Put/seqid=0 2024-11-23T15:26:10,571 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:10,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T15:26:10,572 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:10,572 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:10,576 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123bea4631e337a48bb844c6f42063ba3d7_1b51fcb9c5cc43364334a31573ca489e store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:10,579 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123bea4631e337a48bb844c6f42063ba3d7_1b51fcb9c5cc43364334a31573ca489e, store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:10,579 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123bea4631e337a48bb844c6f42063ba3d7_1b51fcb9c5cc43364334a31573ca489e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:10,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742201_1377 (size=4469) 2024-11-23T15:26:10,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742200_1376 (size=12731) 2024-11-23T15:26:10,589 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#A#compaction#315 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:10,590 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/c0ebf57ede8c49e4b2733d76e2cc00e6 is 175, key is test_row_0/A:col10/1732375569292/Put/seqid=0 2024-11-23T15:26:10,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742202_1378 (size=31685) 2024-11-23T15:26:10,594 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/fb58ec12f70a48f485fcc8feeffda6c4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/fb58ec12f70a48f485fcc8feeffda6c4 2024-11-23T15:26:10,599 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/c0ebf57ede8c49e4b2733d76e2cc00e6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c0ebf57ede8c49e4b2733d76e2cc00e6 2024-11-23T15:26:10,599 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/B of 1b51fcb9c5cc43364334a31573ca489e into fb58ec12f70a48f485fcc8feeffda6c4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:10,599 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:10,599 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/B, priority=12, startTime=1732375570558; duration=0sec 2024-11-23T15:26:10,599 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:10,600 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:B 2024-11-23T15:26:10,600 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:10,601 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46654 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:10,601 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 1b51fcb9c5cc43364334a31573ca489e/C is initiating minor compaction (all files) 2024-11-23T15:26:10,601 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 1b51fcb9c5cc43364334a31573ca489e/C in TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:10,602 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6f94ffeeb7fb448180c197757a170a57, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/daea28aece32411ab826bd889b346eb9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0b07adbe2baa499189da1f3fc1b75e4e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3d694fdcfe5f405d88b7bbbc402c71e0] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp, totalSize=45.6 K 2024-11-23T15:26:10,602 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f94ffeeb7fb448180c197757a170a57, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732375564428 2024-11-23T15:26:10,602 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting daea28aece32411ab826bd889b346eb9, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732375564614 2024-11-23T15:26:10,603 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b07adbe2baa499189da1f3fc1b75e4e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732375565958 2024-11-23T15:26:10,603 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d694fdcfe5f405d88b7bbbc402c71e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732375567146 2024-11-23T15:26:10,604 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/A of 1b51fcb9c5cc43364334a31573ca489e into c0ebf57ede8c49e4b2733d76e2cc00e6(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:10,604 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:10,604 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/A, priority=12, startTime=1732375570558; duration=0sec 2024-11-23T15:26:10,604 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:10,604 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:A 2024-11-23T15:26:10,611 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1b51fcb9c5cc43364334a31573ca489e#C#compaction#316 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:10,611 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/232920696c0248118edaeab0b6253662 is 50, key is test_row_0/C:col10/1732375569292/Put/seqid=0 2024-11-23T15:26:10,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742203_1379 (size=12731) 2024-11-23T15:26:10,620 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/232920696c0248118edaeab0b6253662 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/232920696c0248118edaeab0b6253662 2024-11-23T15:26:10,624 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 1b51fcb9c5cc43364334a31573ca489e/C of 1b51fcb9c5cc43364334a31573ca489e into 232920696c0248118edaeab0b6253662(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:10,624 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:10,624 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e., storeName=1b51fcb9c5cc43364334a31573ca489e/C, priority=12, startTime=1732375570558; duration=0sec 2024-11-23T15:26:10,625 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:10,625 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:C 2024-11-23T15:26:10,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T15:26:10,724 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:10,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-23T15:26:10,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:10,724 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T15:26:10,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:26:10,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:10,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:26:10,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:10,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:26:10,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:10,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235d440d7cb78a495f80092adff0351df7_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375569360/Put/seqid=0 2024-11-23T15:26:10,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742204_1380 (size=12454) 2024-11-23T15:26:10,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T15:26:11,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:11,142 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235d440d7cb78a495f80092adff0351df7_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235d440d7cb78a495f80092adff0351df7_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:11,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/17cfd739baf74afe8c0f230b5faf3b50, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:11,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/17cfd739baf74afe8c0f230b5faf3b50 is 175, key is test_row_0/A:col10/1732375569360/Put/seqid=0 2024-11-23T15:26:11,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742205_1381 (size=31255) 2024-11-23T15:26:11,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T15:26:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:11,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. as already flushing 2024-11-23T15:26:11,548 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/17cfd739baf74afe8c0f230b5faf3b50 2024-11-23T15:26:11,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/9c83c67e909943278341e1e0aba4cae8 is 50, key is test_row_0/B:col10/1732375569360/Put/seqid=0 2024-11-23T15:26:11,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742206_1382 (size=12301) 2024-11-23T15:26:11,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:11,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375631554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:11,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:11,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375631659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:11,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T15:26:11,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:11,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375631863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:11,959 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/9c83c67e909943278341e1e0aba4cae8 2024-11-23T15:26:11,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/2d5cc8d614be46bf8ce70f545da622fa is 50, key is test_row_0/C:col10/1732375569360/Put/seqid=0 2024-11-23T15:26:11,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742207_1383 (size=12301) 2024-11-23T15:26:12,064 DEBUG [Thread-1417 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5aee939b to 127.0.0.1:62881 2024-11-23T15:26:12,065 DEBUG [Thread-1417 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:12,065 DEBUG [Thread-1423 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:62881 2024-11-23T15:26:12,065 DEBUG [Thread-1423 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:12,066 DEBUG [Thread-1419 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:62881 2024-11-23T15:26:12,066 DEBUG [Thread-1419 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:12,069 DEBUG [Thread-1425 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b308f62 to 127.0.0.1:62881 2024-11-23T15:26:12,069 DEBUG [Thread-1425 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:12,069 DEBUG [Thread-1421 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:62881 2024-11-23T15:26:12,069 DEBUG [Thread-1421 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:12,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:12,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51386 deadline: 1732375632171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:12,370 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/2d5cc8d614be46bf8ce70f545da622fa 2024-11-23T15:26:12,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/17cfd739baf74afe8c0f230b5faf3b50 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/17cfd739baf74afe8c0f230b5faf3b50 2024-11-23T15:26:12,377 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/17cfd739baf74afe8c0f230b5faf3b50, entries=150, sequenceid=276, filesize=30.5 K 2024-11-23T15:26:12,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/9c83c67e909943278341e1e0aba4cae8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/9c83c67e909943278341e1e0aba4cae8 2024-11-23T15:26:12,380 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/9c83c67e909943278341e1e0aba4cae8, entries=150, sequenceid=276, filesize=12.0 K 2024-11-23T15:26:12,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/2d5cc8d614be46bf8ce70f545da622fa as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/2d5cc8d614be46bf8ce70f545da622fa 2024-11-23T15:26:12,383 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/2d5cc8d614be46bf8ce70f545da622fa, entries=150, sequenceid=276, filesize=12.0 K 2024-11-23T15:26:12,384 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 1b51fcb9c5cc43364334a31573ca489e in 1660ms, sequenceid=276, compaction requested=false 2024-11-23T15:26:12,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:12,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:12,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-23T15:26:12,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-23T15:26:12,386 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-23T15:26:12,386 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8130 sec 2024-11-23T15:26:12,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.8200 sec 2024-11-23T15:26:12,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T15:26:12,676 INFO [Thread-1416 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-23T15:26:12,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:12,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T15:26:12,677 DEBUG [Thread-1406 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b82ba2a to 127.0.0.1:62881 2024-11-23T15:26:12,677 DEBUG [Thread-1406 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:12,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:26:12,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:12,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:26:12,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:12,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:26:12,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:12,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123dd5623bc35714eb5a0d31bb281da59f5_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375572676/Put/seqid=0 2024-11-23T15:26:12,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742208_1384 (size=12454) 2024-11-23T15:26:13,025 DEBUG [Thread-1408 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b6cf8cb to 127.0.0.1:62881 2024-11-23T15:26:13,025 DEBUG [Thread-1408 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:13,040 DEBUG [Thread-1410 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:62881 2024-11-23T15:26:13,040 DEBUG [Thread-1410 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:13,073 DEBUG [Thread-1414 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c336ea4 to 127.0.0.1:62881 2024-11-23T15:26:13,073 DEBUG [Thread-1414 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:13,081 DEBUG [Thread-1412 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:62881 2024-11-23T15:26:13,081 DEBUG [Thread-1412 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 164 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 22 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 12 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 8 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 23 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2794 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8382 rows 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2802 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8406 rows 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2793 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8379 rows 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2776 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8328 rows 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2803 2024-11-23T15:26:13,082 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8409 rows 2024-11-23T15:26:13,082 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T15:26:13,082 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b976e1a to 127.0.0.1:62881 2024-11-23T15:26:13,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:13,084 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T15:26:13,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T15:26:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T15:26:13,088 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375573088"}]},"ts":"1732375573088"} 2024-11-23T15:26:13,089 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T15:26:13,090 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:13,091 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T15:26:13,092 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T15:26:13,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, UNASSIGN}] 2024-11-23T15:26:13,094 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123dd5623bc35714eb5a0d31bb281da59f5_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123dd5623bc35714eb5a0d31bb281da59f5_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:13,094 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, UNASSIGN 2024-11-23T15:26:13,094 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/d3c6671b6ca9416faebc411e889d708a, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:13,094 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=1b51fcb9c5cc43364334a31573ca489e, regionState=CLOSING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:26:13,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/d3c6671b6ca9416faebc411e889d708a is 175, key is test_row_0/A:col10/1732375572676/Put/seqid=0 2024-11-23T15:26:13,095 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T15:26:13,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; CloseRegionProcedure 1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:26:13,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742209_1385 (size=31255) 2024-11-23T15:26:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T15:26:13,247 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:13,247 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(124): Close 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:13,247 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T15:26:13,247 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1681): Closing 1b51fcb9c5cc43364334a31573ca489e, disabling compactions & flushes 2024-11-23T15:26:13,247 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T15:26:13,499 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=289, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/d3c6671b6ca9416faebc411e889d708a 2024-11-23T15:26:13,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/b33a52401f0c47d6b2381b551756db67 is 50, key is test_row_0/B:col10/1732375572676/Put/seqid=0 2024-11-23T15:26:13,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742210_1386 (size=12301) 2024-11-23T15:26:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T15:26:13,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/b33a52401f0c47d6b2381b551756db67 2024-11-23T15:26:13,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/9bb26376ebe54b53bbfb67e689e005b7 is 50, key is test_row_0/C:col10/1732375572676/Put/seqid=0 2024-11-23T15:26:13,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742211_1387 (size=12301) 2024-11-23T15:26:14,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T15:26:14,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/9bb26376ebe54b53bbfb67e689e005b7 2024-11-23T15:26:14,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/d3c6671b6ca9416faebc411e889d708a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d3c6671b6ca9416faebc411e889d708a 2024-11-23T15:26:14,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d3c6671b6ca9416faebc411e889d708a, entries=150, sequenceid=289, filesize=30.5 K 2024-11-23T15:26:14,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/b33a52401f0c47d6b2381b551756db67 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/b33a52401f0c47d6b2381b551756db67 2024-11-23T15:26:14,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/b33a52401f0c47d6b2381b551756db67, entries=150, sequenceid=289, filesize=12.0 K 2024-11-23T15:26:14,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/9bb26376ebe54b53bbfb67e689e005b7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/9bb26376ebe54b53bbfb67e689e005b7 2024-11-23T15:26:14,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/9bb26376ebe54b53bbfb67e689e005b7, entries=150, sequenceid=289, filesize=12.0 K 2024-11-23T15:26:14,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=26.84 KB/27480 for 1b51fcb9c5cc43364334a31573ca489e in 1656ms, sequenceid=289, compaction requested=true 2024-11-23T15:26:14,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:14,333 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:14,333 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:14,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:14,333 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. after waiting 0 ms 2024-11-23T15:26:14,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:14,333 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:14,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:14,333 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. because compaction request was cancelled 2024-11-23T15:26:14,333 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. because compaction request was cancelled 2024-11-23T15:26:14,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:14,333 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(2837): Flushing 1b51fcb9c5cc43364334a31573ca489e 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-23T15:26:14,333 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:B 2024-11-23T15:26:14,333 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:A 2024-11-23T15:26:14,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1b51fcb9c5cc43364334a31573ca489e:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:14,333 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. because compaction request was cancelled 2024-11-23T15:26:14,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:14,333 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1b51fcb9c5cc43364334a31573ca489e:C 2024-11-23T15:26:14,333 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=A 2024-11-23T15:26:14,333 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:14,333 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=B 2024-11-23T15:26:14,333 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:14,333 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 1b51fcb9c5cc43364334a31573ca489e, store=C 2024-11-23T15:26:14,333 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:14,338 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f4e31d5063374d12a69cf829636c4ea9_1b51fcb9c5cc43364334a31573ca489e is 50, key is test_row_0/A:col10/1732375573072/Put/seqid=0 2024-11-23T15:26:14,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742212_1388 (size=9914) 2024-11-23T15:26:14,741 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:14,744 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f4e31d5063374d12a69cf829636c4ea9_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f4e31d5063374d12a69cf829636c4ea9_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:14,745 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/97aa4a84c328430a83d4f60aa96f8c07, store: [table=TestAcidGuarantees family=A region=1b51fcb9c5cc43364334a31573ca489e] 2024-11-23T15:26:14,746 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/97aa4a84c328430a83d4f60aa96f8c07 is 175, key is test_row_0/A:col10/1732375573072/Put/seqid=0 2024-11-23T15:26:14,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742213_1389 (size=22561) 2024-11-23T15:26:15,150 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=296, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/97aa4a84c328430a83d4f60aa96f8c07 2024-11-23T15:26:15,155 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/3f712b675029401db552014923ed09e3 is 50, key is test_row_0/B:col10/1732375573072/Put/seqid=0 2024-11-23T15:26:15,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742214_1390 (size=9857) 2024-11-23T15:26:15,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T15:26:15,402 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T15:26:15,559 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/3f712b675029401db552014923ed09e3 2024-11-23T15:26:15,564 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/1c071a3eaaf74452b96438e1c48e611d is 50, key is test_row_0/C:col10/1732375573072/Put/seqid=0 2024-11-23T15:26:15,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742215_1391 (size=9857) 2024-11-23T15:26:15,968 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/1c071a3eaaf74452b96438e1c48e611d 2024-11-23T15:26:15,971 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/A/97aa4a84c328430a83d4f60aa96f8c07 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/97aa4a84c328430a83d4f60aa96f8c07 2024-11-23T15:26:15,974 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/97aa4a84c328430a83d4f60aa96f8c07, entries=100, sequenceid=296, filesize=22.0 K 2024-11-23T15:26:15,975 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/B/3f712b675029401db552014923ed09e3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/3f712b675029401db552014923ed09e3 2024-11-23T15:26:15,978 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/3f712b675029401db552014923ed09e3, entries=100, sequenceid=296, filesize=9.6 K 2024-11-23T15:26:15,978 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/.tmp/C/1c071a3eaaf74452b96438e1c48e611d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/1c071a3eaaf74452b96438e1c48e611d 2024-11-23T15:26:15,981 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/1c071a3eaaf74452b96438e1c48e611d, entries=100, sequenceid=296, filesize=9.6 K 2024-11-23T15:26:15,982 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 1b51fcb9c5cc43364334a31573ca489e in 1649ms, sequenceid=296, compaction requested=true 2024-11-23T15:26:15,982 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/350d3a12f9664df8b98aadd325d08058, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/478d8eb427a543a4aa2209198924f38a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c914442502fe43d79f90a79dd2c9daea, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/20973a57493249a193e17fa523f249e9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/a6003b208ab94f5c82af3cc87eee28d4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eebeaf28e8e74499a98a3d64cb0e91a5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/4a2b4e8719584746a4edbb4a03beef8e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/218ae65b56684266b3f4aea4121ac3d8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/314c64c0543940b1bbd462f14f8fc6ad, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/9769b8152127404cbec2c731d806e885, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/16ee2c4e0b15424191e94197ba61affd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/5687745c558b4133b8e5db9b95727dce, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c5800bb88a3b411fb4b8d7e36516d41e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eb08d221c4834885a59bb705046db278, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/71829370bbdf4f0d803db9e1f68db00c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/8010aebc23a248419b85fea04865bb60, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d58e05eeb86148649d9528dc1c797b83] to archive 2024-11-23T15:26:15,983 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:26:15,984 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/350d3a12f9664df8b98aadd325d08058 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/350d3a12f9664df8b98aadd325d08058 2024-11-23T15:26:15,985 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/478d8eb427a543a4aa2209198924f38a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/478d8eb427a543a4aa2209198924f38a 2024-11-23T15:26:15,986 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c914442502fe43d79f90a79dd2c9daea to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c914442502fe43d79f90a79dd2c9daea 2024-11-23T15:26:15,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/20973a57493249a193e17fa523f249e9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/20973a57493249a193e17fa523f249e9 2024-11-23T15:26:15,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/a6003b208ab94f5c82af3cc87eee28d4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/a6003b208ab94f5c82af3cc87eee28d4 2024-11-23T15:26:15,988 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eebeaf28e8e74499a98a3d64cb0e91a5 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eebeaf28e8e74499a98a3d64cb0e91a5 2024-11-23T15:26:15,989 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/4a2b4e8719584746a4edbb4a03beef8e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/4a2b4e8719584746a4edbb4a03beef8e 2024-11-23T15:26:15,989 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/218ae65b56684266b3f4aea4121ac3d8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/218ae65b56684266b3f4aea4121ac3d8 2024-11-23T15:26:15,990 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/314c64c0543940b1bbd462f14f8fc6ad to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/314c64c0543940b1bbd462f14f8fc6ad 2024-11-23T15:26:15,991 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/9769b8152127404cbec2c731d806e885 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/9769b8152127404cbec2c731d806e885 2024-11-23T15:26:15,991 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/16ee2c4e0b15424191e94197ba61affd to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/16ee2c4e0b15424191e94197ba61affd 2024-11-23T15:26:15,992 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/5687745c558b4133b8e5db9b95727dce to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/5687745c558b4133b8e5db9b95727dce 2024-11-23T15:26:15,993 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c5800bb88a3b411fb4b8d7e36516d41e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c5800bb88a3b411fb4b8d7e36516d41e 2024-11-23T15:26:15,994 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eb08d221c4834885a59bb705046db278 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/eb08d221c4834885a59bb705046db278 2024-11-23T15:26:15,995 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/71829370bbdf4f0d803db9e1f68db00c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/71829370bbdf4f0d803db9e1f68db00c 2024-11-23T15:26:15,995 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/8010aebc23a248419b85fea04865bb60 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/8010aebc23a248419b85fea04865bb60 2024-11-23T15:26:15,996 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d58e05eeb86148649d9528dc1c797b83 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d58e05eeb86148649d9528dc1c797b83 2024-11-23T15:26:15,997 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/7c3c3bc78bbb4a9c9403df36fb61bcc1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a52c1784f752479db240332a636cc0e6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/40e3b21e7f534a5db6e6272a1cde58d5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/c8a3f7dc9b9243f6ae549962e43f4870, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/f4367b1ed36e4fd3828e2b25d90184e4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/0d998298572c420d91d7d8e05e2eb070, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/4305f6d5ed704e0c8061947a1ba3a6ef, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/39d72d3633ea4511bda6d019118fcbee, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/621349a3dcb74aae9f9cbd14953ea086, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a2cedf4f5305426fbfa57764efd7515c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/953a3fa99bcd4d9e9800d5cdfdb434e9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/37deadbdc3404824a7dc3d3232bfa2ba, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/b9acb12d332645c981b15df86458afd9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/e1dde2623e1948af8e77a8145a855e3e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/ec644308aec04c958fe87304bce98fa0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/df604f9aeaeb4a3e906d5226b4fb5c7a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/2ed2c3bb62054f6d96ff65cd366d2f02] to archive 2024-11-23T15:26:15,998 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:26:15,999 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/7c3c3bc78bbb4a9c9403df36fb61bcc1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/7c3c3bc78bbb4a9c9403df36fb61bcc1 2024-11-23T15:26:16,000 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a52c1784f752479db240332a636cc0e6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a52c1784f752479db240332a636cc0e6 2024-11-23T15:26:16,001 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/40e3b21e7f534a5db6e6272a1cde58d5 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/40e3b21e7f534a5db6e6272a1cde58d5 2024-11-23T15:26:16,001 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/c8a3f7dc9b9243f6ae549962e43f4870 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/c8a3f7dc9b9243f6ae549962e43f4870 2024-11-23T15:26:16,002 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/f4367b1ed36e4fd3828e2b25d90184e4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/f4367b1ed36e4fd3828e2b25d90184e4 2024-11-23T15:26:16,003 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/0d998298572c420d91d7d8e05e2eb070 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/0d998298572c420d91d7d8e05e2eb070 2024-11-23T15:26:16,004 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/4305f6d5ed704e0c8061947a1ba3a6ef to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/4305f6d5ed704e0c8061947a1ba3a6ef 2024-11-23T15:26:16,004 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/39d72d3633ea4511bda6d019118fcbee to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/39d72d3633ea4511bda6d019118fcbee 2024-11-23T15:26:16,005 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/621349a3dcb74aae9f9cbd14953ea086 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/621349a3dcb74aae9f9cbd14953ea086 2024-11-23T15:26:16,006 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a2cedf4f5305426fbfa57764efd7515c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/a2cedf4f5305426fbfa57764efd7515c 2024-11-23T15:26:16,007 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/953a3fa99bcd4d9e9800d5cdfdb434e9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/953a3fa99bcd4d9e9800d5cdfdb434e9 2024-11-23T15:26:16,007 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/37deadbdc3404824a7dc3d3232bfa2ba to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/37deadbdc3404824a7dc3d3232bfa2ba 2024-11-23T15:26:16,008 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/b9acb12d332645c981b15df86458afd9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/b9acb12d332645c981b15df86458afd9 2024-11-23T15:26:16,009 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/e1dde2623e1948af8e77a8145a855e3e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/e1dde2623e1948af8e77a8145a855e3e 2024-11-23T15:26:16,010 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/ec644308aec04c958fe87304bce98fa0 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/ec644308aec04c958fe87304bce98fa0 2024-11-23T15:26:16,010 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/df604f9aeaeb4a3e906d5226b4fb5c7a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/df604f9aeaeb4a3e906d5226b4fb5c7a 2024-11-23T15:26:16,011 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/2ed2c3bb62054f6d96ff65cd366d2f02 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/2ed2c3bb62054f6d96ff65cd366d2f02 2024-11-23T15:26:16,012 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/79cc7be3c63c4d349530918c261ddf95, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6359688c365344f784cba62571d73c03, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/d4440cad78604a5d83a1630917b68d4b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/7db99c6601f0484f8be3f800761f92e3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/00f10cb888a54e20a6ed651c29edf91d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/35c84c22947b42bbb3ec8dd3ed41bda3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/44899797c75748c8a3d0dc8f3ae8d4c6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/a41a00df0a8d456695fb2fd167d4d9cc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/644e4db7ec9c49548e642024e600fb3f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/5daccf0f05cd4877a9170df96a585b50, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/afc5f827da4b4cc4b8a05106ea5ba3ca, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0d2e4486f34543a8a5bfb15d2826685f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6f94ffeeb7fb448180c197757a170a57, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3a90b096c3724c81b4445f43044b050c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/daea28aece32411ab826bd889b346eb9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0b07adbe2baa499189da1f3fc1b75e4e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3d694fdcfe5f405d88b7bbbc402c71e0] to archive 2024-11-23T15:26:16,013 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:26:16,013 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/79cc7be3c63c4d349530918c261ddf95 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/79cc7be3c63c4d349530918c261ddf95 2024-11-23T15:26:16,014 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6359688c365344f784cba62571d73c03 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6359688c365344f784cba62571d73c03 2024-11-23T15:26:16,015 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/d4440cad78604a5d83a1630917b68d4b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/d4440cad78604a5d83a1630917b68d4b 2024-11-23T15:26:16,016 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/7db99c6601f0484f8be3f800761f92e3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/7db99c6601f0484f8be3f800761f92e3 2024-11-23T15:26:16,016 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/00f10cb888a54e20a6ed651c29edf91d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/00f10cb888a54e20a6ed651c29edf91d 2024-11-23T15:26:16,017 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/35c84c22947b42bbb3ec8dd3ed41bda3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/35c84c22947b42bbb3ec8dd3ed41bda3 2024-11-23T15:26:16,018 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/44899797c75748c8a3d0dc8f3ae8d4c6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/44899797c75748c8a3d0dc8f3ae8d4c6 2024-11-23T15:26:16,018 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/a41a00df0a8d456695fb2fd167d4d9cc to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/a41a00df0a8d456695fb2fd167d4d9cc 2024-11-23T15:26:16,019 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/644e4db7ec9c49548e642024e600fb3f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/644e4db7ec9c49548e642024e600fb3f 2024-11-23T15:26:16,020 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/5daccf0f05cd4877a9170df96a585b50 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/5daccf0f05cd4877a9170df96a585b50 2024-11-23T15:26:16,020 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/afc5f827da4b4cc4b8a05106ea5ba3ca to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/afc5f827da4b4cc4b8a05106ea5ba3ca 2024-11-23T15:26:16,021 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0d2e4486f34543a8a5bfb15d2826685f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0d2e4486f34543a8a5bfb15d2826685f 2024-11-23T15:26:16,022 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6f94ffeeb7fb448180c197757a170a57 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/6f94ffeeb7fb448180c197757a170a57 2024-11-23T15:26:16,023 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3a90b096c3724c81b4445f43044b050c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3a90b096c3724c81b4445f43044b050c 2024-11-23T15:26:16,024 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/daea28aece32411ab826bd889b346eb9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/daea28aece32411ab826bd889b346eb9 2024-11-23T15:26:16,024 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0b07adbe2baa499189da1f3fc1b75e4e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/0b07adbe2baa499189da1f3fc1b75e4e 2024-11-23T15:26:16,025 DEBUG [StoreCloser-TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3d694fdcfe5f405d88b7bbbc402c71e0 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/3d694fdcfe5f405d88b7bbbc402c71e0 2024-11-23T15:26:16,028 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/recovered.edits/299.seqid, newMaxSeqId=299, maxSeqId=4 2024-11-23T15:26:16,029 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e. 2024-11-23T15:26:16,029 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1635): Region close journal for 1b51fcb9c5cc43364334a31573ca489e: 2024-11-23T15:26:16,030 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(170): Closed 1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:16,030 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=1b51fcb9c5cc43364334a31573ca489e, regionState=CLOSED 2024-11-23T15:26:16,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-23T15:26:16,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseRegionProcedure 1b51fcb9c5cc43364334a31573ca489e, server=6a36843bf905,33811,1732375456985 in 2.9360 sec 2024-11-23T15:26:16,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=119 2024-11-23T15:26:16,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=119, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=1b51fcb9c5cc43364334a31573ca489e, UNASSIGN in 2.9390 sec 2024-11-23T15:26:16,034 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-23T15:26:16,034 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.9420 sec 2024-11-23T15:26:16,035 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375576035"}]},"ts":"1732375576035"} 2024-11-23T15:26:16,036 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T15:26:16,038 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T15:26:16,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.9540 sec 2024-11-23T15:26:16,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T15:26:17,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T15:26:17,192 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-23T15:26:17,192 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T15:26:17,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:17,194 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=122, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:17,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-23T15:26:17,194 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=122, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:17,196 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,197 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/recovered.edits] 2024-11-23T15:26:17,199 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/17cfd739baf74afe8c0f230b5faf3b50 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/17cfd739baf74afe8c0f230b5faf3b50 2024-11-23T15:26:17,200 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/97aa4a84c328430a83d4f60aa96f8c07 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/97aa4a84c328430a83d4f60aa96f8c07 2024-11-23T15:26:17,200 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c0ebf57ede8c49e4b2733d76e2cc00e6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/c0ebf57ede8c49e4b2733d76e2cc00e6 2024-11-23T15:26:17,201 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d3c6671b6ca9416faebc411e889d708a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/A/d3c6671b6ca9416faebc411e889d708a 2024-11-23T15:26:17,203 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/3f712b675029401db552014923ed09e3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/3f712b675029401db552014923ed09e3 2024-11-23T15:26:17,203 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/9c83c67e909943278341e1e0aba4cae8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/9c83c67e909943278341e1e0aba4cae8 2024-11-23T15:26:17,204 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/b33a52401f0c47d6b2381b551756db67 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/b33a52401f0c47d6b2381b551756db67 2024-11-23T15:26:17,205 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/fb58ec12f70a48f485fcc8feeffda6c4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/B/fb58ec12f70a48f485fcc8feeffda6c4 2024-11-23T15:26:17,206 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/1c071a3eaaf74452b96438e1c48e611d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/1c071a3eaaf74452b96438e1c48e611d 2024-11-23T15:26:17,207 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/232920696c0248118edaeab0b6253662 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/232920696c0248118edaeab0b6253662 2024-11-23T15:26:17,207 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/2d5cc8d614be46bf8ce70f545da622fa to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/2d5cc8d614be46bf8ce70f545da622fa 2024-11-23T15:26:17,208 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/9bb26376ebe54b53bbfb67e689e005b7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/C/9bb26376ebe54b53bbfb67e689e005b7 2024-11-23T15:26:17,210 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/recovered.edits/299.seqid to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e/recovered.edits/299.seqid 2024-11-23T15:26:17,210 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,210 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T15:26:17,211 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T15:26:17,211 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-23T15:26:17,213 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112308214c0e253d44239b787441be22f446_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112308214c0e253d44239b787441be22f446_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,214 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230ac289dcc12f417990c0ac65309c34a4_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230ac289dcc12f417990c0ac65309c34a4_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,215 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232b9c2f5d071d420c8eb892d674e0d95d_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232b9c2f5d071d420c8eb892d674e0d95d_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,215 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233f1893c25979425bbb9eb1bcec87cf52_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233f1893c25979425bbb9eb1bcec87cf52_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,216 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112353a83cf0c32746729e4497a6f28224de_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112353a83cf0c32746729e4497a6f28224de_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,217 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235d440d7cb78a495f80092adff0351df7_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235d440d7cb78a495f80092adff0351df7_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,218 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236a5e2dcfe0c5402e958a2ca81b0211d6_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236a5e2dcfe0c5402e958a2ca81b0211d6_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,219 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112386ea5970eb024ce4b80209ae2476cfa0_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112386ea5970eb024ce4b80209ae2476cfa0_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,219 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112390f89f2e1ece425f865de721874884d6_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112390f89f2e1ece425f865de721874884d6_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,220 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a69c46b4e35c4ff29bfa43eee7b7004a_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a69c46b4e35c4ff29bfa43eee7b7004a_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,221 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123b24717760159490b9ae81cdad8837cde_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123b24717760159490b9ae81cdad8837cde_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,222 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123ba36fd8f856d4486a70120f5b75ddd30_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123ba36fd8f856d4486a70120f5b75ddd30_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,222 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d06a82892c574f9b88bdc6973eaa900e_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d06a82892c574f9b88bdc6973eaa900e_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,223 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123dd5623bc35714eb5a0d31bb281da59f5_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123dd5623bc35714eb5a0d31bb281da59f5_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,224 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f4e31d5063374d12a69cf829636c4ea9_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f4e31d5063374d12a69cf829636c4ea9_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,225 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f7065cfe1cd24872aeea8f6855e60cde_1b51fcb9c5cc43364334a31573ca489e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f7065cfe1cd24872aeea8f6855e60cde_1b51fcb9c5cc43364334a31573ca489e 2024-11-23T15:26:17,225 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T15:26:17,227 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=122, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:17,228 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T15:26:17,230 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T15:26:17,231 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=122, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:17,231 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T15:26:17,231 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732375577231"}]},"ts":"9223372036854775807"} 2024-11-23T15:26:17,233 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T15:26:17,233 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1b51fcb9c5cc43364334a31573ca489e, NAME => 'TestAcidGuarantees,,1732375548940.1b51fcb9c5cc43364334a31573ca489e.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T15:26:17,233 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T15:26:17,233 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732375577233"}]},"ts":"9223372036854775807"} 2024-11-23T15:26:17,234 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T15:26:17,236 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=122, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:17,237 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 44 msec 2024-11-23T15:26:17,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-23T15:26:17,295 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-23T15:26:17,304 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=240 (was 238) - Thread LEAK? -, OpenFileDescriptor=461 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=283 (was 273) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3688 (was 3727) 2024-11-23T15:26:17,313 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=240, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=283, ProcessCount=11, AvailableMemoryMB=3688 2024-11-23T15:26:17,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T15:26:17,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:26:17,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=123, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:17,316 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T15:26:17,316 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:17,316 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 123 2024-11-23T15:26:17,317 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T15:26:17,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T15:26:17,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742216_1392 (size=963) 2024-11-23T15:26:17,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T15:26:17,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T15:26:17,723 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704 2024-11-23T15:26:17,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742217_1393 (size=53) 2024-11-23T15:26:17,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T15:26:18,128 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:26:18,129 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 858dfe5d7eeba9b673a3436fe5ded0f9, disabling compactions & flushes 2024-11-23T15:26:18,129 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:18,129 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:18,129 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. after waiting 0 ms 2024-11-23T15:26:18,129 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:18,129 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:18,129 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:18,130 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T15:26:18,130 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732375578130"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732375578130"}]},"ts":"1732375578130"} 2024-11-23T15:26:18,131 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T15:26:18,131 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T15:26:18,132 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375578132"}]},"ts":"1732375578132"} 2024-11-23T15:26:18,132 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T15:26:18,136 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=858dfe5d7eeba9b673a3436fe5ded0f9, ASSIGN}] 2024-11-23T15:26:18,137 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=858dfe5d7eeba9b673a3436fe5ded0f9, ASSIGN 2024-11-23T15:26:18,137 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=858dfe5d7eeba9b673a3436fe5ded0f9, ASSIGN; state=OFFLINE, location=6a36843bf905,33811,1732375456985; forceNewPlan=false, retain=false 2024-11-23T15:26:18,288 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=858dfe5d7eeba9b673a3436fe5ded0f9, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:26:18,289 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; OpenRegionProcedure 858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:26:18,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T15:26:18,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:18,443 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:18,443 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7285): Opening region: {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:26:18,443 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:18,443 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:26:18,443 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7327): checking encryption for 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:18,443 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7330): checking classloading for 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:18,444 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:18,445 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:26:18,446 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 858dfe5d7eeba9b673a3436fe5ded0f9 columnFamilyName A 2024-11-23T15:26:18,446 DEBUG [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:18,446 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] regionserver.HStore(327): Store=858dfe5d7eeba9b673a3436fe5ded0f9/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:26:18,446 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:18,447 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:26:18,447 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 858dfe5d7eeba9b673a3436fe5ded0f9 columnFamilyName B 2024-11-23T15:26:18,447 DEBUG [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:18,448 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] regionserver.HStore(327): Store=858dfe5d7eeba9b673a3436fe5ded0f9/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:26:18,448 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:18,448 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:26:18,449 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 858dfe5d7eeba9b673a3436fe5ded0f9 columnFamilyName C 2024-11-23T15:26:18,449 DEBUG [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:18,449 INFO [StoreOpener-858dfe5d7eeba9b673a3436fe5ded0f9-1 {}] regionserver.HStore(327): Store=858dfe5d7eeba9b673a3436fe5ded0f9/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:26:18,449 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:18,450 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:18,450 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:18,451 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:26:18,452 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1085): writing seq id for 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:18,454 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:26:18,454 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1102): Opened 858dfe5d7eeba9b673a3436fe5ded0f9; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62652298, jitterRate=-0.06640800833702087}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:26:18,455 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1001): Region open journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:18,456 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., pid=125, masterSystemTime=1732375578440 2024-11-23T15:26:18,457 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:18,457 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:18,457 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=858dfe5d7eeba9b673a3436fe5ded0f9, regionState=OPEN, openSeqNum=2, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:26:18,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-23T15:26:18,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; OpenRegionProcedure 858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 in 169 msec 2024-11-23T15:26:18,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-23T15:26:18,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=858dfe5d7eeba9b673a3436fe5ded0f9, ASSIGN in 323 msec 2024-11-23T15:26:18,460 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T15:26:18,460 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375578460"}]},"ts":"1732375578460"} 2024-11-23T15:26:18,461 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T15:26:18,463 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T15:26:18,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-11-23T15:26:19,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T15:26:19,421 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 123 completed 2024-11-23T15:26:19,422 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x022a6e9f to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c60eb7d 2024-11-23T15:26:19,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695c2253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,427 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,428 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55498, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,429 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T15:26:19,430 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56546, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T15:26:19,431 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-11-23T15:26:19,434 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,435 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-11-23T15:26:19,437 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,438 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-11-23T15:26:19,440 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,440 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-11-23T15:26:19,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,443 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-11-23T15:26:19,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,446 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11193a0c to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d672ed2 2024-11-23T15:26:19,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7c40ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,450 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-11-23T15:26:19,453 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,453 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-11-23T15:26:19,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,456 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-11-23T15:26:19,459 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,459 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c2725 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2405c04e 2024-11-23T15:26:19,462 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f0408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:19,468 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:19,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-23T15:26:19,469 DEBUG [hconnection-0x32fd34df-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,469 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:19,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T15:26:19,470 DEBUG [hconnection-0x21291153-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,470 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:19,470 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:19,470 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55514, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,471 DEBUG [hconnection-0x4b946a48-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,471 DEBUG [hconnection-0x4d01dcea-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,471 DEBUG [hconnection-0x13c37e18-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,471 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,472 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55540, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,472 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,472 DEBUG [hconnection-0x2eed16a7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,472 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,472 DEBUG [hconnection-0x3f9f6332-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,473 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,473 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55586, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:19,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:19,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:19,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:19,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:19,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:19,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:19,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:19,484 DEBUG [hconnection-0x48147094-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,485 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375639488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375639488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375639488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375639489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,491 DEBUG [hconnection-0x51c7d23a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,491 DEBUG [hconnection-0xe51c249-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:19,492 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55602, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,492 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55612, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:19,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375639493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/ff2c5e7784d94e57ac12e1d97bd83886 is 50, key is test_row_0/A:col10/1732375579476/Put/seqid=0 2024-11-23T15:26:19,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742218_1394 (size=9657) 2024-11-23T15:26:19,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T15:26:19,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375639589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375639589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375639589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375639590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375639596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,622 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T15:26:19,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:19,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:19,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:19,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:19,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:19,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:19,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T15:26:19,775 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T15:26:19,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:19,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:19,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:19,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:19,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:19,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:19,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375639793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375639794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375639794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375639795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:19,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375639799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/ff2c5e7784d94e57ac12e1d97bd83886 2024-11-23T15:26:19,928 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:19,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T15:26:19,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:19,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:19,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:19,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:19,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:19,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:19,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/64f1198f2dbd4d64a111e6976be76e71 is 50, key is test_row_0/B:col10/1732375579476/Put/seqid=0 2024-11-23T15:26:19,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742219_1395 (size=9657) 2024-11-23T15:26:19,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/64f1198f2dbd4d64a111e6976be76e71 2024-11-23T15:26:19,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/c69ea70c178f47a982a0147d5fdc266c is 50, key is test_row_0/C:col10/1732375579476/Put/seqid=0 2024-11-23T15:26:19,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742220_1396 (size=9657) 2024-11-23T15:26:19,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/c69ea70c178f47a982a0147d5fdc266c 2024-11-23T15:26:19,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/ff2c5e7784d94e57ac12e1d97bd83886 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff2c5e7784d94e57ac12e1d97bd83886 2024-11-23T15:26:19,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff2c5e7784d94e57ac12e1d97bd83886, entries=100, sequenceid=13, filesize=9.4 K 2024-11-23T15:26:19,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/64f1198f2dbd4d64a111e6976be76e71 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/64f1198f2dbd4d64a111e6976be76e71 2024-11-23T15:26:19,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/64f1198f2dbd4d64a111e6976be76e71, entries=100, sequenceid=13, filesize=9.4 K 2024-11-23T15:26:19,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/c69ea70c178f47a982a0147d5fdc266c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c69ea70c178f47a982a0147d5fdc266c 2024-11-23T15:26:19,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c69ea70c178f47a982a0147d5fdc266c, entries=100, sequenceid=13, filesize=9.4 K 2024-11-23T15:26:19,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 517ms, sequenceid=13, compaction requested=false 2024-11-23T15:26:19,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:20,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T15:26:20,081 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T15:26:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:20,082 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:26:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:20,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:20,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/542944b52b0b40cf9bc4b170149f00d4 is 50, key is test_row_0/A:col10/1732375579487/Put/seqid=0 2024-11-23T15:26:20,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742221_1397 (size=12001) 2024-11-23T15:26:20,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:20,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:20,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375640105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375640106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375640106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375640107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375640107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375640210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375640210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375640211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375640211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375640211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375640415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375640415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375640415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375640415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375640416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,492 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/542944b52b0b40cf9bc4b170149f00d4 2024-11-23T15:26:20,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/f4acc1b2c855428496a4719f2e813f24 is 50, key is test_row_0/B:col10/1732375579487/Put/seqid=0 2024-11-23T15:26:20,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742222_1398 (size=12001) 2024-11-23T15:26:20,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T15:26:20,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375640718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375640719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375640720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375640720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:20,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375640721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:20,906 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/f4acc1b2c855428496a4719f2e813f24 2024-11-23T15:26:20,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/a4b0a5bafcae4501a6d4bb043a2e18d8 is 50, key is test_row_0/C:col10/1732375579487/Put/seqid=0 2024-11-23T15:26:20,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742223_1399 (size=12001) 2024-11-23T15:26:20,917 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/a4b0a5bafcae4501a6d4bb043a2e18d8 2024-11-23T15:26:20,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/542944b52b0b40cf9bc4b170149f00d4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/542944b52b0b40cf9bc4b170149f00d4 2024-11-23T15:26:20,924 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/542944b52b0b40cf9bc4b170149f00d4, entries=150, sequenceid=38, filesize=11.7 K 2024-11-23T15:26:20,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/f4acc1b2c855428496a4719f2e813f24 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f4acc1b2c855428496a4719f2e813f24 2024-11-23T15:26:20,929 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f4acc1b2c855428496a4719f2e813f24, entries=150, sequenceid=38, filesize=11.7 K 2024-11-23T15:26:20,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/a4b0a5bafcae4501a6d4bb043a2e18d8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/a4b0a5bafcae4501a6d4bb043a2e18d8 2024-11-23T15:26:20,933 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/a4b0a5bafcae4501a6d4bb043a2e18d8, entries=150, sequenceid=38, filesize=11.7 K 2024-11-23T15:26:20,934 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 852ms, sequenceid=38, compaction requested=false 2024-11-23T15:26:20,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:20,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:20,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-23T15:26:20,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-23T15:26:20,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-23T15:26:20,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4650 sec 2024-11-23T15:26:20,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.4690 sec 2024-11-23T15:26:21,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:21,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:21,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/12459cb25b49437c8997285d93f9e0bf is 50, key is test_row_0/A:col10/1732375580098/Put/seqid=0 2024-11-23T15:26:21,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742224_1400 (size=14341) 2024-11-23T15:26:21,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375641245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375641249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375641250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375641250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375641250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375641351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375641355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375641355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375641356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375641356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,552 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T15:26:21,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375641556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375641560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375641560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375641564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375641564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T15:26:21,574 INFO [Thread-1789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-23T15:26:21,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:21,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-23T15:26:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T15:26:21,576 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:21,577 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:21,577 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:21,633 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/12459cb25b49437c8997285d93f9e0bf 2024-11-23T15:26:21,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/1d5bf8887c8b446db8849f6ab45dc62b is 50, key is test_row_0/B:col10/1732375580098/Put/seqid=0 2024-11-23T15:26:21,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742225_1401 (size=12001) 2024-11-23T15:26:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T15:26:21,728 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T15:26:21,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:21,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:21,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:21,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:21,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:21,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:21,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375641858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375641865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375641866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375641866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:21,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375641868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T15:26:21,881 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:21,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T15:26:21,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:21,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:21,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:21,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:21,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:21,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:22,034 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T15:26:22,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:22,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:22,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:22,035 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:22,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:22,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:22,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/1d5bf8887c8b446db8849f6ab45dc62b 2024-11-23T15:26:22,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/c773eaba62f441c6982fe9144c18faa0 is 50, key is test_row_0/C:col10/1732375580098/Put/seqid=0 2024-11-23T15:26:22,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742226_1402 (size=12001) 2024-11-23T15:26:22,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/c773eaba62f441c6982fe9144c18faa0 2024-11-23T15:26:22,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/12459cb25b49437c8997285d93f9e0bf as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/12459cb25b49437c8997285d93f9e0bf 2024-11-23T15:26:22,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/12459cb25b49437c8997285d93f9e0bf, entries=200, sequenceid=50, filesize=14.0 K 2024-11-23T15:26:22,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/1d5bf8887c8b446db8849f6ab45dc62b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1d5bf8887c8b446db8849f6ab45dc62b 2024-11-23T15:26:22,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1d5bf8887c8b446db8849f6ab45dc62b, entries=150, sequenceid=50, filesize=11.7 K 2024-11-23T15:26:22,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/c773eaba62f441c6982fe9144c18faa0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c773eaba62f441c6982fe9144c18faa0 2024-11-23T15:26:22,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c773eaba62f441c6982fe9144c18faa0, entries=150, sequenceid=50, filesize=11.7 K 2024-11-23T15:26:22,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 852ms, sequenceid=50, compaction requested=true 2024-11-23T15:26:22,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:22,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:22,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:22,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:22,076 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:22,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:22,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:22,076 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:22,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:22,078 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:22,078 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/A is initiating minor compaction (all files) 2024-11-23T15:26:22,078 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:22,078 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/A in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:22,078 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/B is initiating minor compaction (all files) 2024-11-23T15:26:22,078 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/B in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:22,078 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff2c5e7784d94e57ac12e1d97bd83886, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/542944b52b0b40cf9bc4b170149f00d4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/12459cb25b49437c8997285d93f9e0bf] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=35.2 K 2024-11-23T15:26:22,078 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/64f1198f2dbd4d64a111e6976be76e71, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f4acc1b2c855428496a4719f2e813f24, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1d5bf8887c8b446db8849f6ab45dc62b] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=32.9 K 2024-11-23T15:26:22,078 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 64f1198f2dbd4d64a111e6976be76e71, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732375579476 2024-11-23T15:26:22,079 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff2c5e7784d94e57ac12e1d97bd83886, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732375579476 2024-11-23T15:26:22,079 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f4acc1b2c855428496a4719f2e813f24, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732375579487 2024-11-23T15:26:22,079 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 542944b52b0b40cf9bc4b170149f00d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732375579487 2024-11-23T15:26:22,079 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d5bf8887c8b446db8849f6ab45dc62b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732375580098 2024-11-23T15:26:22,079 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12459cb25b49437c8997285d93f9e0bf, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732375580098 2024-11-23T15:26:22,090 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#B#compaction#336 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:22,090 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#A#compaction#335 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:22,090 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/dcf381a2d27949cfaad48bdb44ec46de is 50, key is test_row_0/B:col10/1732375580098/Put/seqid=0 2024-11-23T15:26:22,091 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/026dea753d9c40209b2b03bd48b7f17b is 50, key is test_row_0/A:col10/1732375580098/Put/seqid=0 2024-11-23T15:26:22,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742227_1403 (size=12104) 2024-11-23T15:26:22,100 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/dcf381a2d27949cfaad48bdb44ec46de as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dcf381a2d27949cfaad48bdb44ec46de 2024-11-23T15:26:22,105 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/B of 858dfe5d7eeba9b673a3436fe5ded0f9 into dcf381a2d27949cfaad48bdb44ec46de(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:22,105 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:22,105 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/B, priority=13, startTime=1732375582076; duration=0sec 2024-11-23T15:26:22,105 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:22,105 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:B 2024-11-23T15:26:22,105 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:22,107 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:22,107 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/C is initiating minor compaction (all files) 2024-11-23T15:26:22,107 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/C in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:22,107 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c69ea70c178f47a982a0147d5fdc266c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/a4b0a5bafcae4501a6d4bb043a2e18d8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c773eaba62f441c6982fe9144c18faa0] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=32.9 K 2024-11-23T15:26:22,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742228_1404 (size=12104) 2024-11-23T15:26:22,108 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c69ea70c178f47a982a0147d5fdc266c, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732375579476 2024-11-23T15:26:22,108 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a4b0a5bafcae4501a6d4bb043a2e18d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732375579487 2024-11-23T15:26:22,108 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c773eaba62f441c6982fe9144c18faa0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732375580098 2024-11-23T15:26:22,117 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#C#compaction#337 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:22,118 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/29f11b0d28fc49dbb513a6e5e63128a1 is 50, key is test_row_0/C:col10/1732375580098/Put/seqid=0 2024-11-23T15:26:22,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742229_1405 (size=12104) 2024-11-23T15:26:22,128 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/29f11b0d28fc49dbb513a6e5e63128a1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/29f11b0d28fc49dbb513a6e5e63128a1 2024-11-23T15:26:22,133 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/C of 858dfe5d7eeba9b673a3436fe5ded0f9 into 29f11b0d28fc49dbb513a6e5e63128a1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:22,133 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:22,133 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/C, priority=13, startTime=1732375582076; duration=0sec 2024-11-23T15:26:22,133 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:22,133 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:C 2024-11-23T15:26:22,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T15:26:22,187 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T15:26:22,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:22,188 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:26:22,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:22,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:22,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:22,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:22,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:22,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:22,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/cae2aa62960649fb9499fae5d63c839c is 50, key is test_row_0/A:col10/1732375581248/Put/seqid=0 2024-11-23T15:26:22,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742230_1406 (size=12001) 2024-11-23T15:26:22,199 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/cae2aa62960649fb9499fae5d63c839c 2024-11-23T15:26:22,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/bb8b301450524e78b08443b4754d7af7 is 50, key is test_row_0/B:col10/1732375581248/Put/seqid=0 2024-11-23T15:26:22,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742231_1407 (size=12001) 2024-11-23T15:26:22,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:22,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:22,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375642374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375642377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375642377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375642378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375642378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375642479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375642482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375642482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375642483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375642483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,512 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/026dea753d9c40209b2b03bd48b7f17b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/026dea753d9c40209b2b03bd48b7f17b 2024-11-23T15:26:22,517 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/A of 858dfe5d7eeba9b673a3436fe5ded0f9 into 026dea753d9c40209b2b03bd48b7f17b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:22,517 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:22,517 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/A, priority=13, startTime=1732375582076; duration=0sec 2024-11-23T15:26:22,517 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:22,517 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:A 2024-11-23T15:26:22,621 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/bb8b301450524e78b08443b4754d7af7 2024-11-23T15:26:22,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e4821e92534a43799a41acca3a77ca73 is 50, key is test_row_0/C:col10/1732375581248/Put/seqid=0 2024-11-23T15:26:22,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742232_1408 (size=12001) 2024-11-23T15:26:22,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T15:26:22,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375642683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375642686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375642686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375642686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375642687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375642985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375642991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375642992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375642992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:22,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:22,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375642992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,036 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e4821e92534a43799a41acca3a77ca73 2024-11-23T15:26:23,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/cae2aa62960649fb9499fae5d63c839c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cae2aa62960649fb9499fae5d63c839c 2024-11-23T15:26:23,044 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cae2aa62960649fb9499fae5d63c839c, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T15:26:23,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/bb8b301450524e78b08443b4754d7af7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/bb8b301450524e78b08443b4754d7af7 2024-11-23T15:26:23,049 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/bb8b301450524e78b08443b4754d7af7, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T15:26:23,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e4821e92534a43799a41acca3a77ca73 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e4821e92534a43799a41acca3a77ca73 2024-11-23T15:26:23,053 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e4821e92534a43799a41acca3a77ca73, entries=150, sequenceid=77, filesize=11.7 K 2024-11-23T15:26:23,054 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 866ms, sequenceid=77, compaction requested=false 2024-11-23T15:26:23,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:23,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:23,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-23T15:26:23,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-23T15:26:23,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-23T15:26:23,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4780 sec 2024-11-23T15:26:23,058 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.4820 sec 2024-11-23T15:26:23,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:23,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:23,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:23,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:23,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:23,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:23,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:23,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:23,503 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/5e1cbcddc952453eb77647dc674051fe is 50, key is test_row_0/A:col10/1732375582371/Put/seqid=0 2024-11-23T15:26:23,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742233_1409 (size=14341) 2024-11-23T15:26:23,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375643525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375643525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375643530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375643531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375643531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375643632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375643633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375643633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375643637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375643637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T15:26:23,679 INFO [Thread-1789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-23T15:26:23,681 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:23,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-23T15:26:23,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T15:26:23,682 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:23,683 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:23,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:23,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T15:26:23,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T15:26:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:23,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:23,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:23,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375643836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375643836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375643839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375643840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:23,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375643841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,922 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/5e1cbcddc952453eb77647dc674051fe 2024-11-23T15:26:23,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/31ace1d3b5ea4aff9dfe211e2381241d is 50, key is test_row_0/B:col10/1732375582371/Put/seqid=0 2024-11-23T15:26:23,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742234_1410 (size=12001) 2024-11-23T15:26:23,933 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/31ace1d3b5ea4aff9dfe211e2381241d 2024-11-23T15:26:23,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/28445b216c984108a1930bf1aa8be421 is 50, key is test_row_0/C:col10/1732375582371/Put/seqid=0 2024-11-23T15:26:23,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742235_1411 (size=12001) 2024-11-23T15:26:23,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T15:26:23,987 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:23,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T15:26:23,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:23,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:23,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:23,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:23,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:23,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:24,140 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T15:26:24,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:24,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:24,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:24,141 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:24,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:24,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:24,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375644141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375644141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375644144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375644145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375644146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T15:26:24,293 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T15:26:24,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:24,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:24,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:24,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:24,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:24,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:24,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/28445b216c984108a1930bf1aa8be421 2024-11-23T15:26:24,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/5e1cbcddc952453eb77647dc674051fe as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e1cbcddc952453eb77647dc674051fe 2024-11-23T15:26:24,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e1cbcddc952453eb77647dc674051fe, entries=200, sequenceid=90, filesize=14.0 K 2024-11-23T15:26:24,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/31ace1d3b5ea4aff9dfe211e2381241d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/31ace1d3b5ea4aff9dfe211e2381241d 2024-11-23T15:26:24,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/31ace1d3b5ea4aff9dfe211e2381241d, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T15:26:24,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/28445b216c984108a1930bf1aa8be421 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/28445b216c984108a1930bf1aa8be421 2024-11-23T15:26:24,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/28445b216c984108a1930bf1aa8be421, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T15:26:24,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 868ms, sequenceid=90, compaction requested=true 2024-11-23T15:26:24,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:24,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:24,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:24,363 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:24,363 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:24,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:24,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:24,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:24,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:24,364 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:24,364 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:24,364 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/B is initiating minor compaction (all files) 2024-11-23T15:26:24,364 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/A is initiating minor compaction (all files) 2024-11-23T15:26:24,364 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/A in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:24,364 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/B in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:24,364 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/026dea753d9c40209b2b03bd48b7f17b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cae2aa62960649fb9499fae5d63c839c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e1cbcddc952453eb77647dc674051fe] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=37.5 K 2024-11-23T15:26:24,364 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dcf381a2d27949cfaad48bdb44ec46de, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/bb8b301450524e78b08443b4754d7af7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/31ace1d3b5ea4aff9dfe211e2381241d] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=35.3 K 2024-11-23T15:26:24,364 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting dcf381a2d27949cfaad48bdb44ec46de, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732375580098 2024-11-23T15:26:24,364 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 026dea753d9c40209b2b03bd48b7f17b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732375580098 2024-11-23T15:26:24,365 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting cae2aa62960649fb9499fae5d63c839c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732375581248 2024-11-23T15:26:24,365 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting bb8b301450524e78b08443b4754d7af7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732375581248 2024-11-23T15:26:24,365 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e1cbcddc952453eb77647dc674051fe, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732375582371 2024-11-23T15:26:24,365 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 31ace1d3b5ea4aff9dfe211e2381241d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732375582371 2024-11-23T15:26:24,372 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#A#compaction#344 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:24,372 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#B#compaction#345 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:24,373 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/56fc6852722b4910a6fbf0f63074dc24 is 50, key is test_row_0/B:col10/1732375582371/Put/seqid=0 2024-11-23T15:26:24,373 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/add718e2257e42608430ff38041c5382 is 50, key is test_row_0/A:col10/1732375582371/Put/seqid=0 2024-11-23T15:26:24,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742236_1412 (size=12207) 2024-11-23T15:26:24,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742237_1413 (size=12207) 2024-11-23T15:26:24,445 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T15:26:24,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:24,446 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T15:26:24,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:24,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:24,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:24,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:24,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:24,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:24,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/867412d9e2cc4254bccc6fa1eec3924b is 50, key is test_row_0/A:col10/1732375583530/Put/seqid=0 2024-11-23T15:26:24,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742238_1414 (size=12001) 2024-11-23T15:26:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:24,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:24,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375644657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375644660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375644660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375644661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375644662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375644763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375644765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375644766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375644766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375644768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,784 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/add718e2257e42608430ff38041c5382 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/add718e2257e42608430ff38041c5382 2024-11-23T15:26:24,784 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/56fc6852722b4910a6fbf0f63074dc24 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/56fc6852722b4910a6fbf0f63074dc24 2024-11-23T15:26:24,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T15:26:24,788 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/A of 858dfe5d7eeba9b673a3436fe5ded0f9 into add718e2257e42608430ff38041c5382(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:24,789 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:24,789 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/A, priority=13, startTime=1732375584363; duration=0sec 2024-11-23T15:26:24,789 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:24,789 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:A 2024-11-23T15:26:24,789 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:24,790 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/B of 858dfe5d7eeba9b673a3436fe5ded0f9 into 56fc6852722b4910a6fbf0f63074dc24(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:24,790 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:24,790 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/B, priority=13, startTime=1732375584363; duration=0sec 2024-11-23T15:26:24,790 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:24,790 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:B 2024-11-23T15:26:24,790 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:24,790 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/C is initiating minor compaction (all files) 2024-11-23T15:26:24,790 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/C in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:24,790 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/29f11b0d28fc49dbb513a6e5e63128a1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e4821e92534a43799a41acca3a77ca73, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/28445b216c984108a1930bf1aa8be421] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=35.3 K 2024-11-23T15:26:24,791 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29f11b0d28fc49dbb513a6e5e63128a1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732375580098 2024-11-23T15:26:24,791 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4821e92534a43799a41acca3a77ca73, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732375581248 2024-11-23T15:26:24,791 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28445b216c984108a1930bf1aa8be421, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732375582371 2024-11-23T15:26:24,798 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#C#compaction#347 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:24,798 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/758f2179b2714d9fa9c1f5638b1d0694 is 50, key is test_row_0/C:col10/1732375582371/Put/seqid=0 2024-11-23T15:26:24,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742239_1415 (size=12207) 2024-11-23T15:26:24,859 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/867412d9e2cc4254bccc6fa1eec3924b 2024-11-23T15:26:24,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/d4304f3498d1423b86f72f531ba10732 is 50, key is test_row_0/B:col10/1732375583530/Put/seqid=0 2024-11-23T15:26:24,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742240_1416 (size=12001) 2024-11-23T15:26:24,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375644969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375644970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375644971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375644971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:24,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:24,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375644974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,208 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/758f2179b2714d9fa9c1f5638b1d0694 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/758f2179b2714d9fa9c1f5638b1d0694 2024-11-23T15:26:25,212 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/C of 858dfe5d7eeba9b673a3436fe5ded0f9 into 758f2179b2714d9fa9c1f5638b1d0694(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:25,212 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:25,212 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/C, priority=13, startTime=1732375584363; duration=0sec 2024-11-23T15:26:25,212 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:25,212 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:C 2024-11-23T15:26:25,270 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/d4304f3498d1423b86f72f531ba10732 2024-11-23T15:26:25,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375645274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375645275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e0e9c4feae2046b987c5e591386af730 is 50, key is test_row_0/C:col10/1732375583530/Put/seqid=0 2024-11-23T15:26:25,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375645275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375645275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375645279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742241_1417 (size=12001) 2024-11-23T15:26:25,683 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e0e9c4feae2046b987c5e591386af730 2024-11-23T15:26:25,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/867412d9e2cc4254bccc6fa1eec3924b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/867412d9e2cc4254bccc6fa1eec3924b 2024-11-23T15:26:25,692 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/867412d9e2cc4254bccc6fa1eec3924b, entries=150, sequenceid=114, filesize=11.7 K 2024-11-23T15:26:25,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/d4304f3498d1423b86f72f531ba10732 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/d4304f3498d1423b86f72f531ba10732 2024-11-23T15:26:25,696 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/d4304f3498d1423b86f72f531ba10732, entries=150, sequenceid=114, filesize=11.7 K 2024-11-23T15:26:25,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e0e9c4feae2046b987c5e591386af730 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0e9c4feae2046b987c5e591386af730 2024-11-23T15:26:25,701 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0e9c4feae2046b987c5e591386af730, entries=150, sequenceid=114, filesize=11.7 K 2024-11-23T15:26:25,701 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 1255ms, sequenceid=114, compaction requested=false 2024-11-23T15:26:25,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:25,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:25,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-23T15:26:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-23T15:26:25,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-23T15:26:25,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0200 sec 2024-11-23T15:26:25,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.0230 sec 2024-11-23T15:26:25,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:25,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T15:26:25,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:25,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:25,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:25,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:25,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:25,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:25,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T15:26:25,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/a8a0ad8e14a14c1bbc2274819281f29a is 50, key is test_row_0/A:col10/1732375585779/Put/seqid=0 2024-11-23T15:26:25,786 INFO [Thread-1789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-23T15:26:25,787 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:25,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-23T15:26:25,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T15:26:25,788 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:25,789 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:25,789 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:25,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742242_1418 (size=14391) 2024-11-23T15:26:25,792 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/a8a0ad8e14a14c1bbc2274819281f29a 2024-11-23T15:26:25,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/dfbf3f8699174400847fac13fe98e565 is 50, key is test_row_0/B:col10/1732375585779/Put/seqid=0 2024-11-23T15:26:25,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742243_1419 (size=12051) 2024-11-23T15:26:25,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375645803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375645805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375645806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375645807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375645812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T15:26:25,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375645913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375645913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375645913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375645913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:25,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375645917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,941 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:25,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T15:26:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:25,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:25,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:25,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T15:26:26,093 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T15:26:26,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:26,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375646117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375646117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375646118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375646118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375646123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/dfbf3f8699174400847fac13fe98e565 2024-11-23T15:26:26,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/316f575f1c254bde8de6f3ba2dc36b68 is 50, key is test_row_0/C:col10/1732375585779/Put/seqid=0 2024-11-23T15:26:26,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742244_1420 (size=12051) 2024-11-23T15:26:26,246 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T15:26:26,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:26,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T15:26:26,399 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T15:26:26,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:26,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375646423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375646423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375646424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375646425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375646428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,551 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T15:26:26,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:26,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:26,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/316f575f1c254bde8de6f3ba2dc36b68 2024-11-23T15:26:26,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/a8a0ad8e14a14c1bbc2274819281f29a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a8a0ad8e14a14c1bbc2274819281f29a 2024-11-23T15:26:26,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a8a0ad8e14a14c1bbc2274819281f29a, entries=200, sequenceid=130, filesize=14.1 K 2024-11-23T15:26:26,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/dfbf3f8699174400847fac13fe98e565 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dfbf3f8699174400847fac13fe98e565 2024-11-23T15:26:26,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dfbf3f8699174400847fac13fe98e565, entries=150, sequenceid=130, filesize=11.8 K 2024-11-23T15:26:26,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/316f575f1c254bde8de6f3ba2dc36b68 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/316f575f1c254bde8de6f3ba2dc36b68 2024-11-23T15:26:26,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/316f575f1c254bde8de6f3ba2dc36b68, entries=150, sequenceid=130, filesize=11.8 K 2024-11-23T15:26:26,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 852ms, sequenceid=130, compaction requested=true 2024-11-23T15:26:26,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:26,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:26,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:26,632 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:26,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:26,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:26,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:26,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:26,632 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:26,633 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:26,633 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:26,633 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/B is initiating minor compaction (all files) 2024-11-23T15:26:26,633 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/A is initiating minor compaction (all files) 2024-11-23T15:26:26,633 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/B in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,633 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/A in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,633 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/56fc6852722b4910a6fbf0f63074dc24, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/d4304f3498d1423b86f72f531ba10732, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dfbf3f8699174400847fac13fe98e565] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=35.4 K 2024-11-23T15:26:26,634 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/add718e2257e42608430ff38041c5382, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/867412d9e2cc4254bccc6fa1eec3924b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a8a0ad8e14a14c1bbc2274819281f29a] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=37.7 K 2024-11-23T15:26:26,634 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 56fc6852722b4910a6fbf0f63074dc24, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732375582371 2024-11-23T15:26:26,634 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting add718e2257e42608430ff38041c5382, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732375582371 2024-11-23T15:26:26,634 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting d4304f3498d1423b86f72f531ba10732, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732375583524 2024-11-23T15:26:26,634 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 867412d9e2cc4254bccc6fa1eec3924b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732375583524 2024-11-23T15:26:26,635 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting dfbf3f8699174400847fac13fe98e565, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732375584653 2024-11-23T15:26:26,635 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8a0ad8e14a14c1bbc2274819281f29a, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732375584653 2024-11-23T15:26:26,642 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#A#compaction#354 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:26,642 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#B#compaction#353 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:26,642 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/96716b0b792149e9ba02169cc641d5b0 is 50, key is test_row_0/A:col10/1732375585779/Put/seqid=0 2024-11-23T15:26:26,643 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/72b4ac89ccac4ebbb1aad33e7f40633a is 50, key is test_row_0/B:col10/1732375585779/Put/seqid=0 2024-11-23T15:26:26,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742246_1422 (size=12359) 2024-11-23T15:26:26,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742245_1421 (size=12359) 2024-11-23T15:26:26,704 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T15:26:26,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:26,704 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T15:26:26,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:26,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:26,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:26,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:26,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:26,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:26,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/baf4a0b4e85d499cb5696ada0bde39bd is 50, key is test_row_0/A:col10/1732375585806/Put/seqid=0 2024-11-23T15:26:26,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742247_1423 (size=12151) 2024-11-23T15:26:26,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T15:26:26,751 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-23T15:26:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T15:26:26,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:26,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:26,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375646940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375646940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375646941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375646942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:26,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:26,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375646943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375647046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375647046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375647048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375647049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375647049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,055 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/72b4ac89ccac4ebbb1aad33e7f40633a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/72b4ac89ccac4ebbb1aad33e7f40633a 2024-11-23T15:26:27,060 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/B of 858dfe5d7eeba9b673a3436fe5ded0f9 into 72b4ac89ccac4ebbb1aad33e7f40633a(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:27,060 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:27,060 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/B, priority=13, startTime=1732375586632; duration=0sec 2024-11-23T15:26:27,061 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:27,061 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:B 2024-11-23T15:26:27,061 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:27,062 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:27,062 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/C is initiating minor compaction (all files) 2024-11-23T15:26:27,062 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/C in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:27,063 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/96716b0b792149e9ba02169cc641d5b0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/96716b0b792149e9ba02169cc641d5b0 2024-11-23T15:26:27,063 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/758f2179b2714d9fa9c1f5638b1d0694, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0e9c4feae2046b987c5e591386af730, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/316f575f1c254bde8de6f3ba2dc36b68] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=35.4 K 2024-11-23T15:26:27,063 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 758f2179b2714d9fa9c1f5638b1d0694, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732375582371 2024-11-23T15:26:27,064 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting e0e9c4feae2046b987c5e591386af730, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732375583524 2024-11-23T15:26:27,064 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 316f575f1c254bde8de6f3ba2dc36b68, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732375584653 2024-11-23T15:26:27,067 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/A of 858dfe5d7eeba9b673a3436fe5ded0f9 into 96716b0b792149e9ba02169cc641d5b0(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:27,067 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:27,067 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/A, priority=13, startTime=1732375586632; duration=0sec 2024-11-23T15:26:27,067 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:27,067 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:A 2024-11-23T15:26:27,072 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#C#compaction#356 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:27,072 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/47a5a32e7e27464fb0d2b94c97cee65f is 50, key is test_row_0/C:col10/1732375585779/Put/seqid=0 2024-11-23T15:26:27,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742248_1424 (size=12359) 2024-11-23T15:26:27,113 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/baf4a0b4e85d499cb5696ada0bde39bd 2024-11-23T15:26:27,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/0b507f64d9ba414ebd2b919b21523c83 is 50, key is test_row_0/B:col10/1732375585806/Put/seqid=0 2024-11-23T15:26:27,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742249_1425 (size=12151) 2024-11-23T15:26:27,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375647250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375647251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375647253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375647254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375647255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,489 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/47a5a32e7e27464fb0d2b94c97cee65f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/47a5a32e7e27464fb0d2b94c97cee65f 2024-11-23T15:26:27,493 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/C of 858dfe5d7eeba9b673a3436fe5ded0f9 into 47a5a32e7e27464fb0d2b94c97cee65f(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:27,494 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:27,494 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/C, priority=13, startTime=1732375586632; duration=0sec 2024-11-23T15:26:27,494 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:27,494 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:C 2024-11-23T15:26:27,524 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/0b507f64d9ba414ebd2b919b21523c83 2024-11-23T15:26:27,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e0607311fcc54253ad1badf3f9da078e is 50, key is test_row_0/C:col10/1732375585806/Put/seqid=0 2024-11-23T15:26:27,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742250_1426 (size=12151) 2024-11-23T15:26:27,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375647554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375647555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375647560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375647563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:27,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375647563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:27,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T15:26:27,936 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e0607311fcc54253ad1badf3f9da078e 2024-11-23T15:26:27,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/baf4a0b4e85d499cb5696ada0bde39bd as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/baf4a0b4e85d499cb5696ada0bde39bd 2024-11-23T15:26:27,943 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/baf4a0b4e85d499cb5696ada0bde39bd, entries=150, sequenceid=153, filesize=11.9 K 2024-11-23T15:26:27,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/0b507f64d9ba414ebd2b919b21523c83 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0b507f64d9ba414ebd2b919b21523c83 2024-11-23T15:26:27,947 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0b507f64d9ba414ebd2b919b21523c83, entries=150, sequenceid=153, filesize=11.9 K 2024-11-23T15:26:27,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e0607311fcc54253ad1badf3f9da078e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0607311fcc54253ad1badf3f9da078e 2024-11-23T15:26:27,952 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0607311fcc54253ad1badf3f9da078e, entries=150, sequenceid=153, filesize=11.9 K 2024-11-23T15:26:27,952 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 1248ms, sequenceid=153, compaction requested=false 2024-11-23T15:26:27,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:27,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:27,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-23T15:26:27,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-23T15:26:27,955 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-23T15:26:27,955 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1640 sec 2024-11-23T15:26:27,956 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.1680 sec 2024-11-23T15:26:28,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:28,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T15:26:28,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:28,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:28,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:28,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:28,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:28,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:28,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/5e52b12c27ab471aa38aa6274e1fefa2 is 50, key is test_row_0/A:col10/1732375586939/Put/seqid=0 2024-11-23T15:26:28,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742251_1427 (size=14541) 2024-11-23T15:26:28,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375648082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375648083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375648084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375648088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375648089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375648191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375648191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375648193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375648194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375648194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375648396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375648396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375648398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375648399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375648399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/5e52b12c27ab471aa38aa6274e1fefa2 2024-11-23T15:26:28,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/1c1b4b6b840a4465b1787e116cd9506e is 50, key is test_row_0/B:col10/1732375586939/Put/seqid=0 2024-11-23T15:26:28,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742252_1428 (size=12151) 2024-11-23T15:26:28,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375648700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375648702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375648702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375648704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:28,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375648704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:28,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/1c1b4b6b840a4465b1787e116cd9506e 2024-11-23T15:26:28,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/3ae4b6f9760843c9842be6633db5a85c is 50, key is test_row_0/C:col10/1732375586939/Put/seqid=0 2024-11-23T15:26:28,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742253_1429 (size=12151) 2024-11-23T15:26:28,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/3ae4b6f9760843c9842be6633db5a85c 2024-11-23T15:26:28,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/5e52b12c27ab471aa38aa6274e1fefa2 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e52b12c27ab471aa38aa6274e1fefa2 2024-11-23T15:26:28,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e52b12c27ab471aa38aa6274e1fefa2, entries=200, sequenceid=171, filesize=14.2 K 2024-11-23T15:26:28,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/1c1b4b6b840a4465b1787e116cd9506e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1c1b4b6b840a4465b1787e116cd9506e 2024-11-23T15:26:28,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1c1b4b6b840a4465b1787e116cd9506e, entries=150, sequenceid=171, filesize=11.9 K 2024-11-23T15:26:28,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/3ae4b6f9760843c9842be6633db5a85c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3ae4b6f9760843c9842be6633db5a85c 2024-11-23T15:26:28,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3ae4b6f9760843c9842be6633db5a85c, entries=150, sequenceid=171, filesize=11.9 K 2024-11-23T15:26:28,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 848ms, sequenceid=171, compaction requested=true 2024-11-23T15:26:28,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:28,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:28,909 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:28,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:28,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:28,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:28,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:28,909 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:28,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:28,909 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39051 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:28,909 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:28,910 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/B is initiating minor compaction (all files) 2024-11-23T15:26:28,910 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/A is initiating minor compaction (all files) 2024-11-23T15:26:28,910 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/B in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:28,910 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/A in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:28,910 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/96716b0b792149e9ba02169cc641d5b0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/baf4a0b4e85d499cb5696ada0bde39bd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e52b12c27ab471aa38aa6274e1fefa2] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=38.1 K 2024-11-23T15:26:28,910 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/72b4ac89ccac4ebbb1aad33e7f40633a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0b507f64d9ba414ebd2b919b21523c83, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1c1b4b6b840a4465b1787e116cd9506e] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=35.8 K 2024-11-23T15:26:28,910 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 72b4ac89ccac4ebbb1aad33e7f40633a, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732375584653 2024-11-23T15:26:28,910 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96716b0b792149e9ba02169cc641d5b0, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732375584653 2024-11-23T15:26:28,910 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b507f64d9ba414ebd2b919b21523c83, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732375585798 2024-11-23T15:26:28,910 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting baf4a0b4e85d499cb5696ada0bde39bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732375585798 2024-11-23T15:26:28,911 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c1b4b6b840a4465b1787e116cd9506e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732375586939 2024-11-23T15:26:28,911 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e52b12c27ab471aa38aa6274e1fefa2, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732375586939 2024-11-23T15:26:28,917 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#A#compaction#363 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:28,917 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#B#compaction#362 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:28,917 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/247553ed3dc14159b3dea6b91f5a21c6 is 50, key is test_row_0/A:col10/1732375586939/Put/seqid=0 2024-11-23T15:26:28,918 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/922eaca726f04d9b84ebf6951d20449a is 50, key is test_row_0/B:col10/1732375586939/Put/seqid=0 2024-11-23T15:26:28,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742254_1430 (size=12561) 2024-11-23T15:26:28,927 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/247553ed3dc14159b3dea6b91f5a21c6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/247553ed3dc14159b3dea6b91f5a21c6 2024-11-23T15:26:28,932 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/A of 858dfe5d7eeba9b673a3436fe5ded0f9 into 247553ed3dc14159b3dea6b91f5a21c6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:28,932 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:28,932 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/A, priority=13, startTime=1732375588909; duration=0sec 2024-11-23T15:26:28,932 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:28,932 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:A 2024-11-23T15:26:28,932 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:28,933 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:28,933 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/C is initiating minor compaction (all files) 2024-11-23T15:26:28,933 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/C in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:28,933 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/47a5a32e7e27464fb0d2b94c97cee65f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0607311fcc54253ad1badf3f9da078e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3ae4b6f9760843c9842be6633db5a85c] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=35.8 K 2024-11-23T15:26:28,933 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47a5a32e7e27464fb0d2b94c97cee65f, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732375584653 2024-11-23T15:26:28,934 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0607311fcc54253ad1badf3f9da078e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732375585798 2024-11-23T15:26:28,934 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ae4b6f9760843c9842be6633db5a85c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732375586939 2024-11-23T15:26:28,943 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#C#compaction#364 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:28,943 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/cfce668c75454353a4a9e0a50de7bf9d is 50, key is test_row_0/C:col10/1732375586939/Put/seqid=0 2024-11-23T15:26:28,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742255_1431 (size=12561) 2024-11-23T15:26:28,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742256_1432 (size=12561) 2024-11-23T15:26:28,952 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/922eaca726f04d9b84ebf6951d20449a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/922eaca726f04d9b84ebf6951d20449a 2024-11-23T15:26:28,956 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/B of 858dfe5d7eeba9b673a3436fe5ded0f9 into 922eaca726f04d9b84ebf6951d20449a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:28,956 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:28,956 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/B, priority=13, startTime=1732375588909; duration=0sec 2024-11-23T15:26:28,956 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:28,956 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:B 2024-11-23T15:26:29,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:29,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T15:26:29,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:29,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:29,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:29,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:29,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:29,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:29,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/90ee7e7390eb44db820ed2e5e1fbfe7f is 50, key is test_row_0/A:col10/1732375588083/Put/seqid=0 2024-11-23T15:26:29,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742257_1433 (size=14541) 2024-11-23T15:26:29,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375649217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375649218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375649221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375649222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375649223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375649324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375649324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375649324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375649327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375649328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,356 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/cfce668c75454353a4a9e0a50de7bf9d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/cfce668c75454353a4a9e0a50de7bf9d 2024-11-23T15:26:29,360 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/C of 858dfe5d7eeba9b673a3436fe5ded0f9 into cfce668c75454353a4a9e0a50de7bf9d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:29,360 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:29,360 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/C, priority=13, startTime=1732375588909; duration=0sec 2024-11-23T15:26:29,361 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:29,361 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:C 2024-11-23T15:26:29,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375649529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375649529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375649529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375649530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375649533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/90ee7e7390eb44db820ed2e5e1fbfe7f 2024-11-23T15:26:29,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/734e7b4e91ee49c086eda0ceed331315 is 50, key is test_row_0/B:col10/1732375588083/Put/seqid=0 2024-11-23T15:26:29,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742258_1434 (size=12151) 2024-11-23T15:26:29,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375649833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375649834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375649834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375649834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:29,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375649839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:29,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T15:26:29,893 INFO [Thread-1789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-23T15:26:29,894 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:29,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-23T15:26:29,896 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:29,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T15:26:29,896 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:29,896 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:29,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T15:26:30,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/734e7b4e91ee49c086eda0ceed331315 2024-11-23T15:26:30,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e42812cf123549eba58dcd776573c3c9 is 50, key is test_row_0/C:col10/1732375588083/Put/seqid=0 2024-11-23T15:26:30,048 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:30,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-23T15:26:30,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:30,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:30,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:30,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:30,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:30,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742259_1435 (size=12151) 2024-11-23T15:26:30,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:30,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T15:26:30,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:30,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-23T15:26:30,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:30,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:30,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:30,201 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:30,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:30,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:30,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375650335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:30,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375650339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:30,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375650340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:30,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375650340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:30,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375650343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:30,353 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:30,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-23T15:26:30,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:30,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:30,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:30,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:30,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:30,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e42812cf123549eba58dcd776573c3c9 2024-11-23T15:26:30,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/90ee7e7390eb44db820ed2e5e1fbfe7f as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/90ee7e7390eb44db820ed2e5e1fbfe7f 2024-11-23T15:26:30,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/90ee7e7390eb44db820ed2e5e1fbfe7f, entries=200, sequenceid=195, filesize=14.2 K 2024-11-23T15:26:30,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/734e7b4e91ee49c086eda0ceed331315 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/734e7b4e91ee49c086eda0ceed331315 2024-11-23T15:26:30,459 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/734e7b4e91ee49c086eda0ceed331315, entries=150, sequenceid=195, filesize=11.9 K 2024-11-23T15:26:30,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/e42812cf123549eba58dcd776573c3c9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e42812cf123549eba58dcd776573c3c9 2024-11-23T15:26:30,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e42812cf123549eba58dcd776573c3c9, entries=150, sequenceid=195, filesize=11.9 K 2024-11-23T15:26:30,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 1256ms, sequenceid=195, compaction requested=false 2024-11-23T15:26:30,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T15:26:30,506 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:30,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-23T15:26:30,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:30,507 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T15:26:30,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:30,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:30,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:30,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:30,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:30,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:30,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/cbde2efee8444b0c9be32d13bfc2b3c0 is 50, key is test_row_0/A:col10/1732375589220/Put/seqid=0 2024-11-23T15:26:30,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742260_1436 (size=12151) 2024-11-23T15:26:30,516 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/cbde2efee8444b0c9be32d13bfc2b3c0 2024-11-23T15:26:30,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/f1b076aee7da443ab4929b1739206f3e is 50, key is test_row_0/B:col10/1732375589220/Put/seqid=0 2024-11-23T15:26:30,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742261_1437 (size=12151) 2024-11-23T15:26:30,525 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/f1b076aee7da443ab4929b1739206f3e 2024-11-23T15:26:30,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/49a3c2cf0c534f60a64f3f42dfde5040 is 50, key is test_row_0/C:col10/1732375589220/Put/seqid=0 2024-11-23T15:26:30,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742262_1438 (size=12151) 2024-11-23T15:26:30,934 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/49a3c2cf0c534f60a64f3f42dfde5040 2024-11-23T15:26:30,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/cbde2efee8444b0c9be32d13bfc2b3c0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cbde2efee8444b0c9be32d13bfc2b3c0 2024-11-23T15:26:30,941 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cbde2efee8444b0c9be32d13bfc2b3c0, entries=150, sequenceid=210, filesize=11.9 K 2024-11-23T15:26:30,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/f1b076aee7da443ab4929b1739206f3e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f1b076aee7da443ab4929b1739206f3e 2024-11-23T15:26:30,945 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f1b076aee7da443ab4929b1739206f3e, entries=150, sequenceid=210, filesize=11.9 K 2024-11-23T15:26:30,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/49a3c2cf0c534f60a64f3f42dfde5040 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/49a3c2cf0c534f60a64f3f42dfde5040 2024-11-23T15:26:30,948 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/49a3c2cf0c534f60a64f3f42dfde5040, entries=150, sequenceid=210, filesize=11.9 K 2024-11-23T15:26:30,949 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=0 B/0 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 442ms, sequenceid=210, compaction requested=true 2024-11-23T15:26:30,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:30,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:30,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-23T15:26:30,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-23T15:26:30,952 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-23T15:26:30,952 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0540 sec 2024-11-23T15:26:30,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.0580 sec 2024-11-23T15:26:30,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T15:26:30,999 INFO [Thread-1789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-23T15:26:31,000 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:31,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-23T15:26:31,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T15:26:31,002 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:31,002 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:31,002 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:31,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T15:26:31,154 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T15:26:31,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:31,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:31,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:31,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-23T15:26:31,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-23T15:26:31,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-23T15:26:31,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 153 msec 2024-11-23T15:26:31,158 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 157 msec 2024-11-23T15:26:31,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T15:26:31,303 INFO [Thread-1789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-23T15:26:31,304 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:31,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-23T15:26:31,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T15:26:31,306 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:31,307 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:31,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:31,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:31,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T15:26:31,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:31,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:31,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:31,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:31,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:31,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:31,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/1bd4298e7dcb412683acbcef672dd00b is 50, key is test_row_0/A:col10/1732375591351/Put/seqid=0 2024-11-23T15:26:31,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742263_1439 (size=14541) 2024-11-23T15:26:31,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375651380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375651381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375651385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375651385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375651386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T15:26:31,458 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T15:26:31,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:31,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:31,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:31,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375651488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375651488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375651491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375651491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375651493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T15:26:31,611 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T15:26:31,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:31,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:31,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:31,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375651692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375651693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375651693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375651694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375651698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,764 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T15:26:31,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:31,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:31,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/1bd4298e7dcb412683acbcef672dd00b 2024-11-23T15:26:31,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:31,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/a19a648b4859434aaea8bcc4f1ea6aca is 50, key is test_row_0/B:col10/1732375591351/Put/seqid=0 2024-11-23T15:26:31,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742264_1440 (size=12151) 2024-11-23T15:26:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T15:26:31,917 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:31,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T15:26:31,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:31,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:31,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:31,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:31,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:31,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375651994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375652000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375652000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375652000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375652001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,070 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T15:26:32,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:32,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:32,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:32,070 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:32,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:32,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/a19a648b4859434aaea8bcc4f1ea6aca 2024-11-23T15:26:32,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/b279310df7944afd918b4078b9803f54 is 50, key is test_row_0/C:col10/1732375591351/Put/seqid=0 2024-11-23T15:26:32,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742265_1441 (size=12151) 2024-11-23T15:26:32,186 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/b279310df7944afd918b4078b9803f54 2024-11-23T15:26:32,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/1bd4298e7dcb412683acbcef672dd00b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/1bd4298e7dcb412683acbcef672dd00b 2024-11-23T15:26:32,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/1bd4298e7dcb412683acbcef672dd00b, entries=200, sequenceid=222, filesize=14.2 K 2024-11-23T15:26:32,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/a19a648b4859434aaea8bcc4f1ea6aca as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a19a648b4859434aaea8bcc4f1ea6aca 2024-11-23T15:26:32,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a19a648b4859434aaea8bcc4f1ea6aca, entries=150, sequenceid=222, filesize=11.9 K 2024-11-23T15:26:32,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/b279310df7944afd918b4078b9803f54 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b279310df7944afd918b4078b9803f54 2024-11-23T15:26:32,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b279310df7944afd918b4078b9803f54, entries=150, sequenceid=222, filesize=11.9 K 2024-11-23T15:26:32,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 848ms, sequenceid=222, compaction requested=true 2024-11-23T15:26:32,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:32,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:32,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:32,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:32,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:32,203 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:32,203 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:32,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:32,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:32,204 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:32,204 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53794 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:32,204 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/B is initiating minor compaction (all files) 2024-11-23T15:26:32,204 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/A is initiating minor compaction (all files) 2024-11-23T15:26:32,204 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/B in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:32,204 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/A in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:32,204 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/922eaca726f04d9b84ebf6951d20449a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/734e7b4e91ee49c086eda0ceed331315, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f1b076aee7da443ab4929b1739206f3e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a19a648b4859434aaea8bcc4f1ea6aca] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=47.9 K 2024-11-23T15:26:32,204 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/247553ed3dc14159b3dea6b91f5a21c6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/90ee7e7390eb44db820ed2e5e1fbfe7f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cbde2efee8444b0c9be32d13bfc2b3c0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/1bd4298e7dcb412683acbcef672dd00b] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=52.5 K 2024-11-23T15:26:32,205 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 922eaca726f04d9b84ebf6951d20449a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732375586939 2024-11-23T15:26:32,205 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 247553ed3dc14159b3dea6b91f5a21c6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732375586939 2024-11-23T15:26:32,205 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 734e7b4e91ee49c086eda0ceed331315, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732375588081 2024-11-23T15:26:32,205 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90ee7e7390eb44db820ed2e5e1fbfe7f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732375588081 2024-11-23T15:26:32,205 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f1b076aee7da443ab4929b1739206f3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732375589217 2024-11-23T15:26:32,205 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbde2efee8444b0c9be32d13bfc2b3c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732375589217 2024-11-23T15:26:32,206 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a19a648b4859434aaea8bcc4f1ea6aca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732375591351 2024-11-23T15:26:32,206 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bd4298e7dcb412683acbcef672dd00b, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732375591351 2024-11-23T15:26:32,215 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#A#compaction#374 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:32,216 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/cd742b7fe4804538aee8582cb462de71 is 50, key is test_row_0/A:col10/1732375591351/Put/seqid=0 2024-11-23T15:26:32,216 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#B#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:32,216 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/7bcac5c2b2d84a4e8b9914ff4cae4f62 is 50, key is test_row_0/B:col10/1732375591351/Put/seqid=0 2024-11-23T15:26:32,222 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T15:26:32,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:32,223 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:26:32,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:32,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:32,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:32,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742267_1443 (size=12697) 2024-11-23T15:26:32,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742266_1442 (size=12697) 2024-11-23T15:26:32,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/ebb63b9240e14a849d3b39a9ade54416 is 50, key is test_row_0/A:col10/1732375591384/Put/seqid=0 2024-11-23T15:26:32,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742268_1444 (size=12151) 2024-11-23T15:26:32,237 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/ebb63b9240e14a849d3b39a9ade54416 2024-11-23T15:26:32,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/cbcba83f2f5a4a2c9a45e199123883d9 is 50, key is test_row_0/B:col10/1732375591384/Put/seqid=0 2024-11-23T15:26:32,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742269_1445 (size=12151) 2024-11-23T15:26:32,254 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/cbcba83f2f5a4a2c9a45e199123883d9 2024-11-23T15:26:32,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/d599dfbc15f140db87790d69190c8c3e is 50, key is test_row_0/C:col10/1732375591384/Put/seqid=0 2024-11-23T15:26:32,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742270_1446 (size=12151) 2024-11-23T15:26:32,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T15:26:32,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:32,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:32,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375652510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375652511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375652512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375652512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375652513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375652614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375652619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375652619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375652619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375652619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,636 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/7bcac5c2b2d84a4e8b9914ff4cae4f62 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/7bcac5c2b2d84a4e8b9914ff4cae4f62 2024-11-23T15:26:32,636 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/cd742b7fe4804538aee8582cb462de71 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cd742b7fe4804538aee8582cb462de71 2024-11-23T15:26:32,641 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/A of 858dfe5d7eeba9b673a3436fe5ded0f9 into cd742b7fe4804538aee8582cb462de71(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:32,641 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/B of 858dfe5d7eeba9b673a3436fe5ded0f9 into 7bcac5c2b2d84a4e8b9914ff4cae4f62(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:32,641 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:32,641 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:32,641 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/A, priority=12, startTime=1732375592203; duration=0sec 2024-11-23T15:26:32,641 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/B, priority=12, startTime=1732375592203; duration=0sec 2024-11-23T15:26:32,641 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:32,641 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:A 2024-11-23T15:26:32,641 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:32,641 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:B 2024-11-23T15:26:32,641 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:32,642 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:32,642 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/C is initiating minor compaction (all files) 2024-11-23T15:26:32,642 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/C in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:32,642 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/cfce668c75454353a4a9e0a50de7bf9d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e42812cf123549eba58dcd776573c3c9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/49a3c2cf0c534f60a64f3f42dfde5040, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b279310df7944afd918b4078b9803f54] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=47.9 K 2024-11-23T15:26:32,643 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfce668c75454353a4a9e0a50de7bf9d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732375586939 2024-11-23T15:26:32,643 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e42812cf123549eba58dcd776573c3c9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732375588081 2024-11-23T15:26:32,643 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49a3c2cf0c534f60a64f3f42dfde5040, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732375589217 2024-11-23T15:26:32,644 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b279310df7944afd918b4078b9803f54, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732375591351 2024-11-23T15:26:32,651 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#C#compaction#379 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:32,651 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/1fa23610cf3947acbd56093691c07dbc is 50, key is test_row_0/C:col10/1732375591351/Put/seqid=0 2024-11-23T15:26:32,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742271_1447 (size=12697) 2024-11-23T15:26:32,659 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/1fa23610cf3947acbd56093691c07dbc as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/1fa23610cf3947acbd56093691c07dbc 2024-11-23T15:26:32,662 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/C of 858dfe5d7eeba9b673a3436fe5ded0f9 into 1fa23610cf3947acbd56093691c07dbc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:32,662 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:32,662 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/C, priority=12, startTime=1732375592203; duration=0sec 2024-11-23T15:26:32,663 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:32,663 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:C 2024-11-23T15:26:32,665 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/d599dfbc15f140db87790d69190c8c3e 2024-11-23T15:26:32,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/ebb63b9240e14a849d3b39a9ade54416 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ebb63b9240e14a849d3b39a9ade54416 2024-11-23T15:26:32,671 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ebb63b9240e14a849d3b39a9ade54416, entries=150, sequenceid=247, filesize=11.9 K 2024-11-23T15:26:32,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/cbcba83f2f5a4a2c9a45e199123883d9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/cbcba83f2f5a4a2c9a45e199123883d9 2024-11-23T15:26:32,675 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/cbcba83f2f5a4a2c9a45e199123883d9, entries=150, sequenceid=247, filesize=11.9 K 2024-11-23T15:26:32,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/d599dfbc15f140db87790d69190c8c3e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/d599dfbc15f140db87790d69190c8c3e 2024-11-23T15:26:32,680 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/d599dfbc15f140db87790d69190c8c3e, entries=150, sequenceid=247, filesize=11.9 K 2024-11-23T15:26:32,681 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 458ms, sequenceid=247, compaction requested=false 2024-11-23T15:26:32,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:32,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:32,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-23T15:26:32,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-23T15:26:32,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-23T15:26:32,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3750 sec 2024-11-23T15:26:32,685 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.3790 sec 2024-11-23T15:26:32,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:32,821 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:32,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:32,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:32,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:32,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:32,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:32,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:32,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/a04169a7c9f24db08fc48fcd04f895e4 is 50, key is test_row_0/A:col10/1732375592512/Put/seqid=0 2024-11-23T15:26:32,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742272_1448 (size=12251) 2024-11-23T15:26:32,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375652849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375652854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375652855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375652856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375652857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375652958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375652962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375652962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375652962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:32,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:32,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375652962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375653162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375653167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375653167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375653167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375653168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/a04169a7c9f24db08fc48fcd04f895e4 2024-11-23T15:26:33,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/de315e42a0ea48e68e3000514aa0272d is 50, key is test_row_0/B:col10/1732375592512/Put/seqid=0 2024-11-23T15:26:33,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742273_1449 (size=12251) 2024-11-23T15:26:33,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T15:26:33,410 INFO [Thread-1789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-23T15:26:33,411 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-23T15:26:33,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T15:26:33,413 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:33,413 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:33,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:33,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375653467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375653475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375653476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375653477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375653477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T15:26:33,565 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-23T15:26:33,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:33,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:33,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:33,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:33,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:33,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:33,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/de315e42a0ea48e68e3000514aa0272d 2024-11-23T15:26:33,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/9db6d522012b4dff8b3b518789d27daf is 50, key is test_row_0/C:col10/1732375592512/Put/seqid=0 2024-11-23T15:26:33,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742274_1450 (size=12251) 2024-11-23T15:26:33,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T15:26:33,717 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,718 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-23T15:26:33,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:33,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:33,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:33,718 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:33,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:33,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:33,870 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,870 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-23T15:26:33,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:33,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:33,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:33,871 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:33,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:33,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:33,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375653973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375653980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375653982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375653984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:33,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:33,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375653984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:34,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T15:26:34,022 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:34,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-23T15:26:34,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:34,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:34,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:34,023 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:34,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:34,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:34,053 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/9db6d522012b4dff8b3b518789d27daf 2024-11-23T15:26:34,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/a04169a7c9f24db08fc48fcd04f895e4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a04169a7c9f24db08fc48fcd04f895e4 2024-11-23T15:26:34,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a04169a7c9f24db08fc48fcd04f895e4, entries=150, sequenceid=262, filesize=12.0 K 2024-11-23T15:26:34,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/de315e42a0ea48e68e3000514aa0272d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/de315e42a0ea48e68e3000514aa0272d 2024-11-23T15:26:34,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/de315e42a0ea48e68e3000514aa0272d, entries=150, sequenceid=262, filesize=12.0 K 2024-11-23T15:26:34,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/9db6d522012b4dff8b3b518789d27daf as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/9db6d522012b4dff8b3b518789d27daf 2024-11-23T15:26:34,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/9db6d522012b4dff8b3b518789d27daf, entries=150, sequenceid=262, filesize=12.0 K 2024-11-23T15:26:34,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 1253ms, sequenceid=262, compaction requested=true 2024-11-23T15:26:34,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:34,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:34,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:34,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:34,075 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:34,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:34,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:34,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:34,075 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:34,076 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:34,076 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:34,076 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/A is initiating minor compaction (all files) 2024-11-23T15:26:34,076 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/B is initiating minor compaction (all files) 2024-11-23T15:26:34,076 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/A in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:34,076 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/B in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:34,076 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cd742b7fe4804538aee8582cb462de71, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ebb63b9240e14a849d3b39a9ade54416, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a04169a7c9f24db08fc48fcd04f895e4] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=36.2 K 2024-11-23T15:26:34,076 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/7bcac5c2b2d84a4e8b9914ff4cae4f62, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/cbcba83f2f5a4a2c9a45e199123883d9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/de315e42a0ea48e68e3000514aa0272d] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=36.2 K 2024-11-23T15:26:34,076 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bcac5c2b2d84a4e8b9914ff4cae4f62, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732375591351 2024-11-23T15:26:34,076 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd742b7fe4804538aee8582cb462de71, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732375591351 2024-11-23T15:26:34,077 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting cbcba83f2f5a4a2c9a45e199123883d9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732375591384 2024-11-23T15:26:34,077 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebb63b9240e14a849d3b39a9ade54416, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732375591384 2024-11-23T15:26:34,077 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a04169a7c9f24db08fc48fcd04f895e4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732375592510 2024-11-23T15:26:34,077 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting de315e42a0ea48e68e3000514aa0272d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732375592510 2024-11-23T15:26:34,083 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#A#compaction#383 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:34,084 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#B#compaction#384 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:34,084 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/8ce1273db6b049499089c9756366e4db is 50, key is test_row_0/A:col10/1732375592512/Put/seqid=0 2024-11-23T15:26:34,085 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/be52b0fe066245a5847cf6d7a28ea50c is 50, key is test_row_0/B:col10/1732375592512/Put/seqid=0 2024-11-23T15:26:34,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742275_1451 (size=12899) 2024-11-23T15:26:34,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742276_1452 (size=12899) 2024-11-23T15:26:34,098 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/be52b0fe066245a5847cf6d7a28ea50c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/be52b0fe066245a5847cf6d7a28ea50c 2024-11-23T15:26:34,103 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/B of 858dfe5d7eeba9b673a3436fe5ded0f9 into be52b0fe066245a5847cf6d7a28ea50c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:34,103 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:34,103 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/B, priority=13, startTime=1732375594075; duration=0sec 2024-11-23T15:26:34,103 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:34,103 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:B 2024-11-23T15:26:34,103 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:34,104 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:34,104 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/C is initiating minor compaction (all files) 2024-11-23T15:26:34,104 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/C in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:34,104 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/1fa23610cf3947acbd56093691c07dbc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/d599dfbc15f140db87790d69190c8c3e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/9db6d522012b4dff8b3b518789d27daf] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=36.2 K 2024-11-23T15:26:34,105 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fa23610cf3947acbd56093691c07dbc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732375591351 2024-11-23T15:26:34,105 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting d599dfbc15f140db87790d69190c8c3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732375591384 2024-11-23T15:26:34,105 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 9db6d522012b4dff8b3b518789d27daf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732375592510 2024-11-23T15:26:34,112 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#C#compaction#385 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:34,112 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/8dcd148d673647d49875d7a48a3db71a is 50, key is test_row_0/C:col10/1732375592512/Put/seqid=0 2024-11-23T15:26:34,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742277_1453 (size=12899) 2024-11-23T15:26:34,175 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:34,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-23T15:26:34,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:34,176 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:26:34,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:34,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:34,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:34,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:34,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:34,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:34,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/934fb38e92f849e488d98ea140c00076 is 50, key is test_row_0/A:col10/1732375592856/Put/seqid=0 2024-11-23T15:26:34,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742278_1454 (size=12301) 2024-11-23T15:26:34,502 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/8ce1273db6b049499089c9756366e4db as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/8ce1273db6b049499089c9756366e4db 2024-11-23T15:26:34,506 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/A of 858dfe5d7eeba9b673a3436fe5ded0f9 into 8ce1273db6b049499089c9756366e4db(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:34,506 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:34,506 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/A, priority=13, startTime=1732375594075; duration=0sec 2024-11-23T15:26:34,506 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:34,506 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:A 2024-11-23T15:26:34,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T15:26:34,522 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/8dcd148d673647d49875d7a48a3db71a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/8dcd148d673647d49875d7a48a3db71a 2024-11-23T15:26:34,526 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/C of 858dfe5d7eeba9b673a3436fe5ded0f9 into 8dcd148d673647d49875d7a48a3db71a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:34,526 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:34,526 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/C, priority=13, startTime=1732375594075; duration=0sec 2024-11-23T15:26:34,527 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:34,527 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:C 2024-11-23T15:26:34,595 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/934fb38e92f849e488d98ea140c00076 2024-11-23T15:26:34,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/0993cc3639e445568f757bc8445943c7 is 50, key is test_row_0/B:col10/1732375592856/Put/seqid=0 2024-11-23T15:26:34,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742279_1455 (size=12301) 2024-11-23T15:26:34,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:34,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:34,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:34,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375654992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:34,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:34,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375654993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:34,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:34,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375654995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375654996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375654997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,007 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/0993cc3639e445568f757bc8445943c7 2024-11-23T15:26:35,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/58be757d453e457bb6edc7c7eb3776d7 is 50, key is test_row_0/C:col10/1732375592856/Put/seqid=0 2024-11-23T15:26:35,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742280_1456 (size=12301) 2024-11-23T15:26:35,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375655098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375655100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375655103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375655103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375655304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375655305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375655307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375655308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,417 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/58be757d453e457bb6edc7c7eb3776d7 2024-11-23T15:26:35,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/934fb38e92f849e488d98ea140c00076 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/934fb38e92f849e488d98ea140c00076 2024-11-23T15:26:35,424 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/934fb38e92f849e488d98ea140c00076, entries=150, sequenceid=288, filesize=12.0 K 2024-11-23T15:26:35,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/0993cc3639e445568f757bc8445943c7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0993cc3639e445568f757bc8445943c7 2024-11-23T15:26:35,428 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0993cc3639e445568f757bc8445943c7, entries=150, sequenceid=288, filesize=12.0 K 2024-11-23T15:26:35,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/58be757d453e457bb6edc7c7eb3776d7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/58be757d453e457bb6edc7c7eb3776d7 2024-11-23T15:26:35,432 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/58be757d453e457bb6edc7c7eb3776d7, entries=150, sequenceid=288, filesize=12.0 K 2024-11-23T15:26:35,433 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 1257ms, sequenceid=288, compaction requested=false 2024-11-23T15:26:35,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:35,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:35,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-23T15:26:35,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-23T15:26:35,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-23T15:26:35,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0210 sec 2024-11-23T15:26:35,436 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 2.0250 sec 2024-11-23T15:26:35,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T15:26:35,517 INFO [Thread-1789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-23T15:26:35,518 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:35,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-23T15:26:35,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T15:26:35,520 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:35,520 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:35,520 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:35,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:35,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:35,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:35,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:35,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:35,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:35,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:35,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:35,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/691f1c627908450fb8b63d8b24ca205c is 50, key is test_row_0/A:col10/1732375594995/Put/seqid=0 2024-11-23T15:26:35,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742281_1457 (size=14741) 2024-11-23T15:26:35,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T15:26:35,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,672 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375655643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T15:26:35,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:35,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:35,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:35,672 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:35,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:35,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:35,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375655672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375655672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375655672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375655773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375655781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375655781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375655781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T15:26:35,824 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T15:26:35,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:35,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:35,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:35,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:35,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:35,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:35,977 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T15:26:35,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:35,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:35,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:35,978 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:35,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:35,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:35,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375655977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375655986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375655987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:35,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:35,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375655987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/691f1c627908450fb8b63d8b24ca205c 2024-11-23T15:26:36,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/87059ff54398433399e890145257d2c3 is 50, key is test_row_0/B:col10/1732375594995/Put/seqid=0 2024-11-23T15:26:36,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742282_1458 (size=12301) 2024-11-23T15:26:36,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T15:26:36,130 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T15:26:36,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:36,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,130 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,282 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T15:26:36,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:36,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:36,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375656282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:36,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375656291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:36,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375656291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:36,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375656293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,434 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/87059ff54398433399e890145257d2c3 2024-11-23T15:26:36,435 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T15:26:36,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:36,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/b9ba42e863864fc7903d17b507533fd4 is 50, key is test_row_0/C:col10/1732375594995/Put/seqid=0 2024-11-23T15:26:36,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742283_1459 (size=12301) 2024-11-23T15:26:36,588 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T15:26:36,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:36,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,588 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T15:26:36,741 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T15:26:36,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:36,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:36,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:36,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375656789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375656796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375656797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375656798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,844 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/b9ba42e863864fc7903d17b507533fd4 2024-11-23T15:26:36,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/691f1c627908450fb8b63d8b24ca205c as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/691f1c627908450fb8b63d8b24ca205c 2024-11-23T15:26:36,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/691f1c627908450fb8b63d8b24ca205c, entries=200, sequenceid=302, filesize=14.4 K 2024-11-23T15:26:36,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/87059ff54398433399e890145257d2c3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/87059ff54398433399e890145257d2c3 2024-11-23T15:26:36,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/87059ff54398433399e890145257d2c3, entries=150, sequenceid=302, filesize=12.0 K 2024-11-23T15:26:36,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/b9ba42e863864fc7903d17b507533fd4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b9ba42e863864fc7903d17b507533fd4 2024-11-23T15:26:36,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b9ba42e863864fc7903d17b507533fd4, entries=150, sequenceid=302, filesize=12.0 K 2024-11-23T15:26:36,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 1250ms, sequenceid=302, compaction requested=true 2024-11-23T15:26:36,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:36,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:36,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:36,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:36,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:36,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:36,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:36,861 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:36,861 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:36,862 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39941 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:36,862 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:36,862 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/A is initiating minor compaction (all files) 2024-11-23T15:26:36,862 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/B is initiating minor compaction (all files) 2024-11-23T15:26:36,862 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/B in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,862 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/A in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,862 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/be52b0fe066245a5847cf6d7a28ea50c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0993cc3639e445568f757bc8445943c7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/87059ff54398433399e890145257d2c3] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=36.6 K 2024-11-23T15:26:36,862 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/8ce1273db6b049499089c9756366e4db, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/934fb38e92f849e488d98ea140c00076, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/691f1c627908450fb8b63d8b24ca205c] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=39.0 K 2024-11-23T15:26:36,862 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ce1273db6b049499089c9756366e4db, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732375592510 2024-11-23T15:26:36,862 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting be52b0fe066245a5847cf6d7a28ea50c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732375592510 2024-11-23T15:26:36,863 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 934fb38e92f849e488d98ea140c00076, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732375592854 2024-11-23T15:26:36,863 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0993cc3639e445568f757bc8445943c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732375592854 2024-11-23T15:26:36,863 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 691f1c627908450fb8b63d8b24ca205c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732375594994 2024-11-23T15:26:36,863 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 87059ff54398433399e890145257d2c3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732375594994 2024-11-23T15:26:36,869 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#A#compaction#393 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:36,869 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#B#compaction#392 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:36,870 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/ff21dd142f47410dbbccdfa0b0927eeb is 50, key is test_row_0/A:col10/1732375594995/Put/seqid=0 2024-11-23T15:26:36,870 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/2d3e287a32214e4b9bbcee6c9b57ec34 is 50, key is test_row_0/B:col10/1732375594995/Put/seqid=0 2024-11-23T15:26:36,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742284_1460 (size=13051) 2024-11-23T15:26:36,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742285_1461 (size=13051) 2024-11-23T15:26:36,878 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/ff21dd142f47410dbbccdfa0b0927eeb as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff21dd142f47410dbbccdfa0b0927eeb 2024-11-23T15:26:36,880 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/2d3e287a32214e4b9bbcee6c9b57ec34 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/2d3e287a32214e4b9bbcee6c9b57ec34 2024-11-23T15:26:36,883 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/A of 858dfe5d7eeba9b673a3436fe5ded0f9 into ff21dd142f47410dbbccdfa0b0927eeb(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:36,883 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:36,883 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/A, priority=13, startTime=1732375596860; duration=0sec 2024-11-23T15:26:36,883 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:36,883 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:A 2024-11-23T15:26:36,883 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:36,884 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/B of 858dfe5d7eeba9b673a3436fe5ded0f9 into 2d3e287a32214e4b9bbcee6c9b57ec34(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:36,884 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:36,884 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/B, priority=13, startTime=1732375596861; duration=0sec 2024-11-23T15:26:36,884 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:36,884 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:B 2024-11-23T15:26:36,884 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:36,884 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/C is initiating minor compaction (all files) 2024-11-23T15:26:36,884 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/C in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,884 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/8dcd148d673647d49875d7a48a3db71a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/58be757d453e457bb6edc7c7eb3776d7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b9ba42e863864fc7903d17b507533fd4] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=36.6 K 2024-11-23T15:26:36,885 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8dcd148d673647d49875d7a48a3db71a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732375592510 2024-11-23T15:26:36,885 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58be757d453e457bb6edc7c7eb3776d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732375592854 2024-11-23T15:26:36,885 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9ba42e863864fc7903d17b507533fd4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732375594994 2024-11-23T15:26:36,891 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#C#compaction#394 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:36,892 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/3d0137ddd9eb4f34944b1e845a303c68 is 50, key is test_row_0/C:col10/1732375594995/Put/seqid=0 2024-11-23T15:26:36,893 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:36,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T15:26:36,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:36,894 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:26:36,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:36,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:36,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:36,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:36,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:36,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:36,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/d4b97d8bb21e41739ecf4d06d3830111 is 50, key is test_row_0/A:col10/1732375595647/Put/seqid=0 2024-11-23T15:26:36,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742286_1462 (size=13051) 2024-11-23T15:26:36,914 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/3d0137ddd9eb4f34944b1e845a303c68 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3d0137ddd9eb4f34944b1e845a303c68 2024-11-23T15:26:36,918 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/C of 858dfe5d7eeba9b673a3436fe5ded0f9 into 3d0137ddd9eb4f34944b1e845a303c68(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:36,918 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:36,918 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/C, priority=13, startTime=1732375596861; duration=0sec 2024-11-23T15:26:36,918 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:36,918 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:C 2024-11-23T15:26:36,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742287_1463 (size=12301) 2024-11-23T15:26:37,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. as already flushing 2024-11-23T15:26:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:37,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375657036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375657140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,321 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/d4b97d8bb21e41739ecf4d06d3830111 2024-11-23T15:26:37,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/69ab217805b44608970995114ee1ca71 is 50, key is test_row_0/B:col10/1732375595647/Put/seqid=0 2024-11-23T15:26:37,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742288_1464 (size=12301) 2024-11-23T15:26:37,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375657344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T15:26:37,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375657646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,731 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/69ab217805b44608970995114ee1ca71 2024-11-23T15:26:37,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/20081849d650417592144f32d263d9d3 is 50, key is test_row_0/C:col10/1732375595647/Put/seqid=0 2024-11-23T15:26:37,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742289_1465 (size=12301) 2024-11-23T15:26:37,742 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/20081849d650417592144f32d263d9d3 2024-11-23T15:26:37,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/d4b97d8bb21e41739ecf4d06d3830111 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/d4b97d8bb21e41739ecf4d06d3830111 2024-11-23T15:26:37,749 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/d4b97d8bb21e41739ecf4d06d3830111, entries=150, sequenceid=329, filesize=12.0 K 2024-11-23T15:26:37,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/69ab217805b44608970995114ee1ca71 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/69ab217805b44608970995114ee1ca71 2024-11-23T15:26:37,754 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/69ab217805b44608970995114ee1ca71, entries=150, sequenceid=329, filesize=12.0 K 2024-11-23T15:26:37,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/20081849d650417592144f32d263d9d3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/20081849d650417592144f32d263d9d3 2024-11-23T15:26:37,757 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/20081849d650417592144f32d263d9d3, entries=150, sequenceid=329, filesize=12.0 K 2024-11-23T15:26:37,758 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 864ms, sequenceid=329, compaction requested=false 2024-11-23T15:26:37,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:37,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:37,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-23T15:26:37,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-23T15:26:37,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-23T15:26:37,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2390 sec 2024-11-23T15:26:37,762 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 2.2430 sec 2024-11-23T15:26:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:37,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:37,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:37,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:37,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:37,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:37,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:37,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:37,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/b7cec2ccfba04059a69af380b86cbc69 is 50, key is test_row_0/A:col10/1732375597035/Put/seqid=0 2024-11-23T15:26:37,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742290_1466 (size=14741) 2024-11-23T15:26:37,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375657833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375657834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375657836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375657836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375657939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375657939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375657940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:37,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375657942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375658144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375658145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375658145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375658145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375658152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/b7cec2ccfba04059a69af380b86cbc69 2024-11-23T15:26:38,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/4c49c5a4b4c44fb9a419bb6e5eea4961 is 50, key is test_row_0/B:col10/1732375597035/Put/seqid=0 2024-11-23T15:26:38,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742291_1467 (size=12301) 2024-11-23T15:26:38,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375658448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375658450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375658451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375658451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/4c49c5a4b4c44fb9a419bb6e5eea4961 2024-11-23T15:26:38,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/3c4d2e7c562a41a59a2ee9346e396d50 is 50, key is test_row_0/C:col10/1732375597035/Put/seqid=0 2024-11-23T15:26:38,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742292_1468 (size=12301) 2024-11-23T15:26:38,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/3c4d2e7c562a41a59a2ee9346e396d50 2024-11-23T15:26:38,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/b7cec2ccfba04059a69af380b86cbc69 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/b7cec2ccfba04059a69af380b86cbc69 2024-11-23T15:26:38,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/b7cec2ccfba04059a69af380b86cbc69, entries=200, sequenceid=342, filesize=14.4 K 2024-11-23T15:26:38,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/4c49c5a4b4c44fb9a419bb6e5eea4961 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/4c49c5a4b4c44fb9a419bb6e5eea4961 2024-11-23T15:26:38,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/4c49c5a4b4c44fb9a419bb6e5eea4961, entries=150, sequenceid=342, filesize=12.0 K 2024-11-23T15:26:38,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/3c4d2e7c562a41a59a2ee9346e396d50 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3c4d2e7c562a41a59a2ee9346e396d50 2024-11-23T15:26:38,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3c4d2e7c562a41a59a2ee9346e396d50, entries=150, sequenceid=342, filesize=12.0 K 2024-11-23T15:26:38,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 855ms, sequenceid=342, compaction requested=true 2024-11-23T15:26:38,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:38,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:38,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:38,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:38,659 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:38,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:38,659 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:38,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:38,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:38,660 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:38,660 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:38,660 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/A is initiating minor compaction (all files) 2024-11-23T15:26:38,660 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/B is initiating minor compaction (all files) 2024-11-23T15:26:38,660 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/A in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:38,660 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/B in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:38,660 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff21dd142f47410dbbccdfa0b0927eeb, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/d4b97d8bb21e41739ecf4d06d3830111, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/b7cec2ccfba04059a69af380b86cbc69] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=39.2 K 2024-11-23T15:26:38,660 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/2d3e287a32214e4b9bbcee6c9b57ec34, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/69ab217805b44608970995114ee1ca71, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/4c49c5a4b4c44fb9a419bb6e5eea4961] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=36.8 K 2024-11-23T15:26:38,660 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff21dd142f47410dbbccdfa0b0927eeb, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732375594994 2024-11-23T15:26:38,661 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d3e287a32214e4b9bbcee6c9b57ec34, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732375594994 2024-11-23T15:26:38,661 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4b97d8bb21e41739ecf4d06d3830111, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732375595634 2024-11-23T15:26:38,661 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 69ab217805b44608970995114ee1ca71, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732375595634 2024-11-23T15:26:38,661 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7cec2ccfba04059a69af380b86cbc69, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732375597027 2024-11-23T15:26:38,662 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c49c5a4b4c44fb9a419bb6e5eea4961, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732375597031 2024-11-23T15:26:38,669 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#A#compaction#401 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:38,669 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/54f24272bafe4bc08943af8886ea289d is 50, key is test_row_0/A:col10/1732375597035/Put/seqid=0 2024-11-23T15:26:38,679 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#B#compaction#402 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:38,680 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/a324b33c10b94294b537a36e60e83051 is 50, key is test_row_0/B:col10/1732375597035/Put/seqid=0 2024-11-23T15:26:38,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742293_1469 (size=13153) 2024-11-23T15:26:38,687 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/54f24272bafe4bc08943af8886ea289d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/54f24272bafe4bc08943af8886ea289d 2024-11-23T15:26:38,691 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/A of 858dfe5d7eeba9b673a3436fe5ded0f9 into 54f24272bafe4bc08943af8886ea289d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:38,691 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:38,691 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/A, priority=13, startTime=1732375598659; duration=0sec 2024-11-23T15:26:38,691 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:38,691 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:A 2024-11-23T15:26:38,691 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:38,692 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:38,692 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/C is initiating minor compaction (all files) 2024-11-23T15:26:38,692 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/C in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:38,692 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3d0137ddd9eb4f34944b1e845a303c68, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/20081849d650417592144f32d263d9d3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3c4d2e7c562a41a59a2ee9346e396d50] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=36.8 K 2024-11-23T15:26:38,693 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d0137ddd9eb4f34944b1e845a303c68, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732375594994 2024-11-23T15:26:38,693 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20081849d650417592144f32d263d9d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732375595634 2024-11-23T15:26:38,693 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c4d2e7c562a41a59a2ee9346e396d50, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732375597031 2024-11-23T15:26:38,706 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#C#compaction#403 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:38,706 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/c5203f83e1ec4bb7b0c36bb05a6ba1be is 50, key is test_row_0/C:col10/1732375597035/Put/seqid=0 2024-11-23T15:26:38,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742294_1470 (size=13153) 2024-11-23T15:26:38,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742295_1471 (size=13153) 2024-11-23T15:26:38,713 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/a324b33c10b94294b537a36e60e83051 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a324b33c10b94294b537a36e60e83051 2024-11-23T15:26:38,717 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/B of 858dfe5d7eeba9b673a3436fe5ded0f9 into a324b33c10b94294b537a36e60e83051(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:38,717 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:38,717 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/B, priority=13, startTime=1732375598659; duration=0sec 2024-11-23T15:26:38,717 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:38,717 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:B 2024-11-23T15:26:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:38,956 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:26:38,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:38,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:38,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:38,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:38,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:38,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:38,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/89794244f2cd4e409cada36dcfcb93b5 is 50, key is test_row_0/A:col10/1732375597835/Put/seqid=0 2024-11-23T15:26:38,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742296_1472 (size=14741) 2024-11-23T15:26:38,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375658964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375658965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375658965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375658968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:38,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/89794244f2cd4e409cada36dcfcb93b5 2024-11-23T15:26:38,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/68a677eddeaa48d3887fc8db7a7d4307 is 50, key is test_row_0/B:col10/1732375597835/Put/seqid=0 2024-11-23T15:26:38,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742297_1473 (size=12301) 2024-11-23T15:26:38,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/68a677eddeaa48d3887fc8db7a7d4307 2024-11-23T15:26:39,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/86745284a80744fba80610d46556f6bd is 50, key is test_row_0/C:col10/1732375597835/Put/seqid=0 2024-11-23T15:26:39,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742298_1474 (size=12301) 2024-11-23T15:26:39,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375659069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:39,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375659072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:39,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375659072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:39,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375659072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:39,116 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/c5203f83e1ec4bb7b0c36bb05a6ba1be as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c5203f83e1ec4bb7b0c36bb05a6ba1be 2024-11-23T15:26:39,120 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/C of 858dfe5d7eeba9b673a3436fe5ded0f9 into c5203f83e1ec4bb7b0c36bb05a6ba1be(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:39,120 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:39,120 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/C, priority=13, startTime=1732375598659; duration=0sec 2024-11-23T15:26:39,120 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:39,120 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:C 2024-11-23T15:26:39,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55602 deadline: 1732375659154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:39,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55590 deadline: 1732375659275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:39,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55526 deadline: 1732375659278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:39,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55540 deadline: 1732375659278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:39,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55574 deadline: 1732375659279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:39,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/86745284a80744fba80610d46556f6bd 2024-11-23T15:26:39,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/89794244f2cd4e409cada36dcfcb93b5 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/89794244f2cd4e409cada36dcfcb93b5 2024-11-23T15:26:39,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/89794244f2cd4e409cada36dcfcb93b5, entries=200, sequenceid=369, filesize=14.4 K 2024-11-23T15:26:39,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/68a677eddeaa48d3887fc8db7a7d4307 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/68a677eddeaa48d3887fc8db7a7d4307 2024-11-23T15:26:39,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/68a677eddeaa48d3887fc8db7a7d4307, entries=150, sequenceid=369, filesize=12.0 K 2024-11-23T15:26:39,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/86745284a80744fba80610d46556f6bd as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/86745284a80744fba80610d46556f6bd 2024-11-23T15:26:39,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/86745284a80744fba80610d46556f6bd, entries=150, sequenceid=369, filesize=12.0 K 2024-11-23T15:26:39,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 466ms, sequenceid=369, compaction requested=false 2024-11-23T15:26:39,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:39,469 DEBUG [Thread-1790 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11193a0c to 127.0.0.1:62881 2024-11-23T15:26:39,469 DEBUG [Thread-1790 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:39,470 DEBUG [Thread-1796 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:62881 2024-11-23T15:26:39,470 DEBUG [Thread-1796 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:39,470 DEBUG [Thread-1794 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:62881 2024-11-23T15:26:39,470 DEBUG [Thread-1794 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:39,472 DEBUG [Thread-1798 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c2725 to 127.0.0.1:62881 2024-11-23T15:26:39,472 DEBUG [Thread-1798 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:39,473 DEBUG [Thread-1792 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:62881 2024-11-23T15:26:39,473 DEBUG [Thread-1792 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:39,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:39,580 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:39,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:39,580 DEBUG [Thread-1787 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:62881 2024-11-23T15:26:39,580 DEBUG [Thread-1787 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:39,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:39,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:39,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:39,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:39,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:39,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/0f6040b49b414d0ba9c947c1a68e2047 is 50, key is test_row_0/A:col10/1732375598967/Put/seqid=0 2024-11-23T15:26:39,584 DEBUG [Thread-1783 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:62881 2024-11-23T15:26:39,584 DEBUG [Thread-1783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:39,585 DEBUG [Thread-1781 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:62881 2024-11-23T15:26:39,585 DEBUG [Thread-1781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:39,585 DEBUG [Thread-1779 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:62881 2024-11-23T15:26:39,585 DEBUG [Thread-1779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:39,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742299_1475 (size=12301) 2024-11-23T15:26:39,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T15:26:39,624 INFO [Thread-1789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-23T15:26:39,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/0f6040b49b414d0ba9c947c1a68e2047 2024-11-23T15:26:39,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/15dbad73109b4c53b5050aaf794d630d is 50, key is test_row_0/B:col10/1732375598967/Put/seqid=0 2024-11-23T15:26:39,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742300_1476 (size=12301) 2024-11-23T15:26:40,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/15dbad73109b4c53b5050aaf794d630d 2024-11-23T15:26:40,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/b6eb419447d0450d8caea0b6f8c281c6 is 50, key is test_row_0/C:col10/1732375598967/Put/seqid=0 2024-11-23T15:26:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742301_1477 (size=12301) 2024-11-23T15:26:40,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/b6eb419447d0450d8caea0b6f8c281c6 2024-11-23T15:26:40,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/0f6040b49b414d0ba9c947c1a68e2047 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/0f6040b49b414d0ba9c947c1a68e2047 2024-11-23T15:26:40,812 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/0f6040b49b414d0ba9c947c1a68e2047, entries=150, sequenceid=382, filesize=12.0 K 2024-11-23T15:26:40,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/15dbad73109b4c53b5050aaf794d630d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/15dbad73109b4c53b5050aaf794d630d 2024-11-23T15:26:40,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/15dbad73109b4c53b5050aaf794d630d, entries=150, sequenceid=382, filesize=12.0 K 2024-11-23T15:26:40,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/b6eb419447d0450d8caea0b6f8c281c6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b6eb419447d0450d8caea0b6f8c281c6 2024-11-23T15:26:40,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b6eb419447d0450d8caea0b6f8c281c6, entries=150, sequenceid=382, filesize=12.0 K 2024-11-23T15:26:40,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=20.13 KB/20610 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 1238ms, sequenceid=382, compaction requested=true 2024-11-23T15:26:40,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:40,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:40,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:40,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:40,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:40,818 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:40,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 858dfe5d7eeba9b673a3436fe5ded0f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:40,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:40,818 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:40,819 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:40,819 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:40,819 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/A is initiating minor compaction (all files) 2024-11-23T15:26:40,819 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/B is initiating minor compaction (all files) 2024-11-23T15:26:40,819 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/A in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:40,819 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/B in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:40,819 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/54f24272bafe4bc08943af8886ea289d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/89794244f2cd4e409cada36dcfcb93b5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/0f6040b49b414d0ba9c947c1a68e2047] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=39.3 K 2024-11-23T15:26:40,819 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a324b33c10b94294b537a36e60e83051, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/68a677eddeaa48d3887fc8db7a7d4307, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/15dbad73109b4c53b5050aaf794d630d] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=36.9 K 2024-11-23T15:26:40,819 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54f24272bafe4bc08943af8886ea289d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732375597031 2024-11-23T15:26:40,819 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a324b33c10b94294b537a36e60e83051, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732375597031 2024-11-23T15:26:40,820 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89794244f2cd4e409cada36dcfcb93b5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732375597833 2024-11-23T15:26:40,820 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 68a677eddeaa48d3887fc8db7a7d4307, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732375597835 2024-11-23T15:26:40,820 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f6040b49b414d0ba9c947c1a68e2047, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732375598960 2024-11-23T15:26:40,820 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 15dbad73109b4c53b5050aaf794d630d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732375598960 2024-11-23T15:26:40,825 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#B#compaction#410 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:40,825 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#A#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:40,826 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/505fd55701194678a8e3eb6b09b54d7e is 50, key is test_row_0/B:col10/1732375598967/Put/seqid=0 2024-11-23T15:26:40,826 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/a8f2d83df455483a9f5e191acc31ff26 is 50, key is test_row_0/A:col10/1732375598967/Put/seqid=0 2024-11-23T15:26:40,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742302_1478 (size=13255) 2024-11-23T15:26:40,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742303_1479 (size=13255) 2024-11-23T15:26:41,162 DEBUG [Thread-1785 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:62881 2024-11-23T15:26:41,162 DEBUG [Thread-1785 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2952 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8856 rows 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2955 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8865 rows 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2947 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8841 rows 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2942 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8826 rows 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2954 2024-11-23T15:26:41,163 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8862 rows 2024-11-23T15:26:41,163 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T15:26:41,163 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x022a6e9f to 127.0.0.1:62881 2024-11-23T15:26:41,163 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:26:41,165 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T15:26:41,165 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T15:26:41,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:41,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T15:26:41,171 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375601171"}]},"ts":"1732375601171"} 2024-11-23T15:26:41,172 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T15:26:41,174 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T15:26:41,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T15:26:41,176 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=858dfe5d7eeba9b673a3436fe5ded0f9, UNASSIGN}] 2024-11-23T15:26:41,176 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=858dfe5d7eeba9b673a3436fe5ded0f9, UNASSIGN 2024-11-23T15:26:41,177 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=858dfe5d7eeba9b673a3436fe5ded0f9, regionState=CLOSING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:26:41,178 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T15:26:41,178 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; CloseRegionProcedure 858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:26:41,233 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/505fd55701194678a8e3eb6b09b54d7e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/505fd55701194678a8e3eb6b09b54d7e 2024-11-23T15:26:41,233 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/a8f2d83df455483a9f5e191acc31ff26 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a8f2d83df455483a9f5e191acc31ff26 2024-11-23T15:26:41,237 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/A of 858dfe5d7eeba9b673a3436fe5ded0f9 into a8f2d83df455483a9f5e191acc31ff26(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:41,237 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/B of 858dfe5d7eeba9b673a3436fe5ded0f9 into 505fd55701194678a8e3eb6b09b54d7e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:41,237 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:41,237 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:41,237 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/B, priority=13, startTime=1732375600818; duration=0sec 2024-11-23T15:26:41,237 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/A, priority=13, startTime=1732375600818; duration=0sec 2024-11-23T15:26:41,237 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:41,237 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:41,237 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:B 2024-11-23T15:26:41,237 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:A 2024-11-23T15:26:41,237 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:41,238 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:41,238 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): 858dfe5d7eeba9b673a3436fe5ded0f9/C is initiating minor compaction (all files) 2024-11-23T15:26:41,238 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 858dfe5d7eeba9b673a3436fe5ded0f9/C in TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:41,238 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c5203f83e1ec4bb7b0c36bb05a6ba1be, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/86745284a80744fba80610d46556f6bd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b6eb419447d0450d8caea0b6f8c281c6] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp, totalSize=36.9 K 2024-11-23T15:26:41,238 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c5203f83e1ec4bb7b0c36bb05a6ba1be, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1732375597031 2024-11-23T15:26:41,238 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 86745284a80744fba80610d46556f6bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732375597835 2024-11-23T15:26:41,239 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b6eb419447d0450d8caea0b6f8c281c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732375598960 2024-11-23T15:26:41,244 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 858dfe5d7eeba9b673a3436fe5ded0f9#C#compaction#412 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:41,244 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/704af4463d8940138a73466f07728e64 is 50, key is test_row_0/C:col10/1732375598967/Put/seqid=0 2024-11-23T15:26:41,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742304_1480 (size=13255) 2024-11-23T15:26:41,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T15:26:41,329 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:41,329 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(124): Close 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:41,329 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T15:26:41,329 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1681): Closing 858dfe5d7eeba9b673a3436fe5ded0f9, disabling compactions & flushes 2024-11-23T15:26:41,329 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1942): waiting for 1 compactions to complete for region TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:41,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T15:26:41,652 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/704af4463d8940138a73466f07728e64 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/704af4463d8940138a73466f07728e64 2024-11-23T15:26:41,655 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 858dfe5d7eeba9b673a3436fe5ded0f9/C of 858dfe5d7eeba9b673a3436fe5ded0f9 into 704af4463d8940138a73466f07728e64(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:41,655 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:41,655 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9., storeName=858dfe5d7eeba9b673a3436fe5ded0f9/C, priority=13, startTime=1732375600818; duration=0sec 2024-11-23T15:26:41,655 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:41,655 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:41,655 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:41,655 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. after waiting 0 ms 2024-11-23T15:26:41,655 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 858dfe5d7eeba9b673a3436fe5ded0f9:C 2024-11-23T15:26:41,655 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:41,656 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(2837): Flushing 858dfe5d7eeba9b673a3436fe5ded0f9 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-23T15:26:41,656 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=A 2024-11-23T15:26:41,656 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:41,656 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=B 2024-11-23T15:26:41,656 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:41,656 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 858dfe5d7eeba9b673a3436fe5ded0f9, store=C 2024-11-23T15:26:41,656 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:41,659 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/df669a23c0514d29aa9dcfd0784780ac is 50, key is test_row_0/A:col10/1732375601161/Put/seqid=0 2024-11-23T15:26:41,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742305_1481 (size=12301) 2024-11-23T15:26:41,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T15:26:42,063 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/df669a23c0514d29aa9dcfd0784780ac 2024-11-23T15:26:42,068 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/2e81f42d44564f1b80d750334878b795 is 50, key is test_row_0/B:col10/1732375601161/Put/seqid=0 2024-11-23T15:26:42,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742306_1482 (size=12301) 2024-11-23T15:26:42,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T15:26:42,472 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/2e81f42d44564f1b80d750334878b795 2024-11-23T15:26:42,477 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/0c2f4c65c36d4c168ec277097b734c54 is 50, key is test_row_0/C:col10/1732375601161/Put/seqid=0 2024-11-23T15:26:42,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742307_1483 (size=12301) 2024-11-23T15:26:42,881 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/0c2f4c65c36d4c168ec277097b734c54 2024-11-23T15:26:42,884 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/A/df669a23c0514d29aa9dcfd0784780ac as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/df669a23c0514d29aa9dcfd0784780ac 2024-11-23T15:26:42,887 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/df669a23c0514d29aa9dcfd0784780ac, entries=150, sequenceid=392, filesize=12.0 K 2024-11-23T15:26:42,887 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/B/2e81f42d44564f1b80d750334878b795 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/2e81f42d44564f1b80d750334878b795 2024-11-23T15:26:42,889 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/2e81f42d44564f1b80d750334878b795, entries=150, sequenceid=392, filesize=12.0 K 2024-11-23T15:26:42,890 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/.tmp/C/0c2f4c65c36d4c168ec277097b734c54 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/0c2f4c65c36d4c168ec277097b734c54 2024-11-23T15:26:42,892 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/0c2f4c65c36d4c168ec277097b734c54, entries=150, sequenceid=392, filesize=12.0 K 2024-11-23T15:26:42,893 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 858dfe5d7eeba9b673a3436fe5ded0f9 in 1238ms, sequenceid=392, compaction requested=false 2024-11-23T15:26:42,893 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff2c5e7784d94e57ac12e1d97bd83886, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/542944b52b0b40cf9bc4b170149f00d4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/12459cb25b49437c8997285d93f9e0bf, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/026dea753d9c40209b2b03bd48b7f17b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cae2aa62960649fb9499fae5d63c839c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e1cbcddc952453eb77647dc674051fe, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/add718e2257e42608430ff38041c5382, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/867412d9e2cc4254bccc6fa1eec3924b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a8a0ad8e14a14c1bbc2274819281f29a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/96716b0b792149e9ba02169cc641d5b0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/baf4a0b4e85d499cb5696ada0bde39bd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e52b12c27ab471aa38aa6274e1fefa2, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/247553ed3dc14159b3dea6b91f5a21c6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/90ee7e7390eb44db820ed2e5e1fbfe7f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cbde2efee8444b0c9be32d13bfc2b3c0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/1bd4298e7dcb412683acbcef672dd00b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cd742b7fe4804538aee8582cb462de71, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ebb63b9240e14a849d3b39a9ade54416, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/8ce1273db6b049499089c9756366e4db, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a04169a7c9f24db08fc48fcd04f895e4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/934fb38e92f849e488d98ea140c00076, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/691f1c627908450fb8b63d8b24ca205c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff21dd142f47410dbbccdfa0b0927eeb, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/d4b97d8bb21e41739ecf4d06d3830111, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/b7cec2ccfba04059a69af380b86cbc69, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/54f24272bafe4bc08943af8886ea289d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/89794244f2cd4e409cada36dcfcb93b5, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/0f6040b49b414d0ba9c947c1a68e2047] to archive 2024-11-23T15:26:42,894 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:26:42,895 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff2c5e7784d94e57ac12e1d97bd83886 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff2c5e7784d94e57ac12e1d97bd83886 2024-11-23T15:26:42,896 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/542944b52b0b40cf9bc4b170149f00d4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/542944b52b0b40cf9bc4b170149f00d4 2024-11-23T15:26:42,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/12459cb25b49437c8997285d93f9e0bf to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/12459cb25b49437c8997285d93f9e0bf 2024-11-23T15:26:42,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/026dea753d9c40209b2b03bd48b7f17b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/026dea753d9c40209b2b03bd48b7f17b 2024-11-23T15:26:42,898 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cae2aa62960649fb9499fae5d63c839c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cae2aa62960649fb9499fae5d63c839c 2024-11-23T15:26:42,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e1cbcddc952453eb77647dc674051fe to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e1cbcddc952453eb77647dc674051fe 2024-11-23T15:26:42,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/add718e2257e42608430ff38041c5382 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/add718e2257e42608430ff38041c5382 2024-11-23T15:26:42,900 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/867412d9e2cc4254bccc6fa1eec3924b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/867412d9e2cc4254bccc6fa1eec3924b 2024-11-23T15:26:42,901 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a8a0ad8e14a14c1bbc2274819281f29a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a8a0ad8e14a14c1bbc2274819281f29a 2024-11-23T15:26:42,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/96716b0b792149e9ba02169cc641d5b0 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/96716b0b792149e9ba02169cc641d5b0 2024-11-23T15:26:42,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/baf4a0b4e85d499cb5696ada0bde39bd to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/baf4a0b4e85d499cb5696ada0bde39bd 2024-11-23T15:26:42,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e52b12c27ab471aa38aa6274e1fefa2 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/5e52b12c27ab471aa38aa6274e1fefa2 2024-11-23T15:26:42,904 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/247553ed3dc14159b3dea6b91f5a21c6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/247553ed3dc14159b3dea6b91f5a21c6 2024-11-23T15:26:42,905 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/90ee7e7390eb44db820ed2e5e1fbfe7f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/90ee7e7390eb44db820ed2e5e1fbfe7f 2024-11-23T15:26:42,906 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cbde2efee8444b0c9be32d13bfc2b3c0 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cbde2efee8444b0c9be32d13bfc2b3c0 2024-11-23T15:26:42,906 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/1bd4298e7dcb412683acbcef672dd00b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/1bd4298e7dcb412683acbcef672dd00b 2024-11-23T15:26:42,907 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cd742b7fe4804538aee8582cb462de71 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/cd742b7fe4804538aee8582cb462de71 2024-11-23T15:26:42,908 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ebb63b9240e14a849d3b39a9ade54416 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ebb63b9240e14a849d3b39a9ade54416 2024-11-23T15:26:42,909 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/8ce1273db6b049499089c9756366e4db to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/8ce1273db6b049499089c9756366e4db 2024-11-23T15:26:42,910 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a04169a7c9f24db08fc48fcd04f895e4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a04169a7c9f24db08fc48fcd04f895e4 2024-11-23T15:26:42,911 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/934fb38e92f849e488d98ea140c00076 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/934fb38e92f849e488d98ea140c00076 2024-11-23T15:26:42,912 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/691f1c627908450fb8b63d8b24ca205c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/691f1c627908450fb8b63d8b24ca205c 2024-11-23T15:26:42,913 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff21dd142f47410dbbccdfa0b0927eeb to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/ff21dd142f47410dbbccdfa0b0927eeb 2024-11-23T15:26:42,913 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/d4b97d8bb21e41739ecf4d06d3830111 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/d4b97d8bb21e41739ecf4d06d3830111 2024-11-23T15:26:42,914 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/b7cec2ccfba04059a69af380b86cbc69 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/b7cec2ccfba04059a69af380b86cbc69 2024-11-23T15:26:42,915 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/54f24272bafe4bc08943af8886ea289d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/54f24272bafe4bc08943af8886ea289d 2024-11-23T15:26:42,916 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/89794244f2cd4e409cada36dcfcb93b5 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/89794244f2cd4e409cada36dcfcb93b5 2024-11-23T15:26:42,917 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/0f6040b49b414d0ba9c947c1a68e2047 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/0f6040b49b414d0ba9c947c1a68e2047 2024-11-23T15:26:42,918 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/64f1198f2dbd4d64a111e6976be76e71, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f4acc1b2c855428496a4719f2e813f24, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dcf381a2d27949cfaad48bdb44ec46de, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1d5bf8887c8b446db8849f6ab45dc62b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/bb8b301450524e78b08443b4754d7af7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/56fc6852722b4910a6fbf0f63074dc24, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/31ace1d3b5ea4aff9dfe211e2381241d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/d4304f3498d1423b86f72f531ba10732, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/72b4ac89ccac4ebbb1aad33e7f40633a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dfbf3f8699174400847fac13fe98e565, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0b507f64d9ba414ebd2b919b21523c83, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/922eaca726f04d9b84ebf6951d20449a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1c1b4b6b840a4465b1787e116cd9506e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/734e7b4e91ee49c086eda0ceed331315, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f1b076aee7da443ab4929b1739206f3e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/7bcac5c2b2d84a4e8b9914ff4cae4f62, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a19a648b4859434aaea8bcc4f1ea6aca, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/cbcba83f2f5a4a2c9a45e199123883d9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/be52b0fe066245a5847cf6d7a28ea50c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/de315e42a0ea48e68e3000514aa0272d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0993cc3639e445568f757bc8445943c7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/2d3e287a32214e4b9bbcee6c9b57ec34, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/87059ff54398433399e890145257d2c3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/69ab217805b44608970995114ee1ca71, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a324b33c10b94294b537a36e60e83051, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/4c49c5a4b4c44fb9a419bb6e5eea4961, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/68a677eddeaa48d3887fc8db7a7d4307, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/15dbad73109b4c53b5050aaf794d630d] to archive 2024-11-23T15:26:42,918 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:26:42,919 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/64f1198f2dbd4d64a111e6976be76e71 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/64f1198f2dbd4d64a111e6976be76e71 2024-11-23T15:26:42,920 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f4acc1b2c855428496a4719f2e813f24 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f4acc1b2c855428496a4719f2e813f24 2024-11-23T15:26:42,921 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dcf381a2d27949cfaad48bdb44ec46de to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dcf381a2d27949cfaad48bdb44ec46de 2024-11-23T15:26:42,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1d5bf8887c8b446db8849f6ab45dc62b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1d5bf8887c8b446db8849f6ab45dc62b 2024-11-23T15:26:42,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/bb8b301450524e78b08443b4754d7af7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/bb8b301450524e78b08443b4754d7af7 2024-11-23T15:26:42,923 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/56fc6852722b4910a6fbf0f63074dc24 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/56fc6852722b4910a6fbf0f63074dc24 2024-11-23T15:26:42,924 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/31ace1d3b5ea4aff9dfe211e2381241d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/31ace1d3b5ea4aff9dfe211e2381241d 2024-11-23T15:26:42,925 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/d4304f3498d1423b86f72f531ba10732 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/d4304f3498d1423b86f72f531ba10732 2024-11-23T15:26:42,926 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/72b4ac89ccac4ebbb1aad33e7f40633a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/72b4ac89ccac4ebbb1aad33e7f40633a 2024-11-23T15:26:42,926 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dfbf3f8699174400847fac13fe98e565 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/dfbf3f8699174400847fac13fe98e565 2024-11-23T15:26:42,927 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0b507f64d9ba414ebd2b919b21523c83 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0b507f64d9ba414ebd2b919b21523c83 2024-11-23T15:26:42,928 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/922eaca726f04d9b84ebf6951d20449a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/922eaca726f04d9b84ebf6951d20449a 2024-11-23T15:26:42,929 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1c1b4b6b840a4465b1787e116cd9506e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/1c1b4b6b840a4465b1787e116cd9506e 2024-11-23T15:26:42,929 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/734e7b4e91ee49c086eda0ceed331315 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/734e7b4e91ee49c086eda0ceed331315 2024-11-23T15:26:42,930 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f1b076aee7da443ab4929b1739206f3e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/f1b076aee7da443ab4929b1739206f3e 2024-11-23T15:26:42,931 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/7bcac5c2b2d84a4e8b9914ff4cae4f62 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/7bcac5c2b2d84a4e8b9914ff4cae4f62 2024-11-23T15:26:42,932 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a19a648b4859434aaea8bcc4f1ea6aca to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a19a648b4859434aaea8bcc4f1ea6aca 2024-11-23T15:26:42,933 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/cbcba83f2f5a4a2c9a45e199123883d9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/cbcba83f2f5a4a2c9a45e199123883d9 2024-11-23T15:26:42,933 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/be52b0fe066245a5847cf6d7a28ea50c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/be52b0fe066245a5847cf6d7a28ea50c 2024-11-23T15:26:42,934 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/de315e42a0ea48e68e3000514aa0272d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/de315e42a0ea48e68e3000514aa0272d 2024-11-23T15:26:42,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0993cc3639e445568f757bc8445943c7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/0993cc3639e445568f757bc8445943c7 2024-11-23T15:26:42,936 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/2d3e287a32214e4b9bbcee6c9b57ec34 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/2d3e287a32214e4b9bbcee6c9b57ec34 2024-11-23T15:26:42,937 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/87059ff54398433399e890145257d2c3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/87059ff54398433399e890145257d2c3 2024-11-23T15:26:42,937 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/69ab217805b44608970995114ee1ca71 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/69ab217805b44608970995114ee1ca71 2024-11-23T15:26:42,938 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a324b33c10b94294b537a36e60e83051 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/a324b33c10b94294b537a36e60e83051 2024-11-23T15:26:42,939 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/4c49c5a4b4c44fb9a419bb6e5eea4961 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/4c49c5a4b4c44fb9a419bb6e5eea4961 2024-11-23T15:26:42,939 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/68a677eddeaa48d3887fc8db7a7d4307 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/68a677eddeaa48d3887fc8db7a7d4307 2024-11-23T15:26:42,940 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/15dbad73109b4c53b5050aaf794d630d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/15dbad73109b4c53b5050aaf794d630d 2024-11-23T15:26:42,941 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c69ea70c178f47a982a0147d5fdc266c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/a4b0a5bafcae4501a6d4bb043a2e18d8, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/29f11b0d28fc49dbb513a6e5e63128a1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c773eaba62f441c6982fe9144c18faa0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e4821e92534a43799a41acca3a77ca73, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/758f2179b2714d9fa9c1f5638b1d0694, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/28445b216c984108a1930bf1aa8be421, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0e9c4feae2046b987c5e591386af730, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/47a5a32e7e27464fb0d2b94c97cee65f, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/316f575f1c254bde8de6f3ba2dc36b68, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0607311fcc54253ad1badf3f9da078e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/cfce668c75454353a4a9e0a50de7bf9d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3ae4b6f9760843c9842be6633db5a85c, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e42812cf123549eba58dcd776573c3c9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/49a3c2cf0c534f60a64f3f42dfde5040, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/1fa23610cf3947acbd56093691c07dbc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b279310df7944afd918b4078b9803f54, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/d599dfbc15f140db87790d69190c8c3e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/8dcd148d673647d49875d7a48a3db71a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/9db6d522012b4dff8b3b518789d27daf, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/58be757d453e457bb6edc7c7eb3776d7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3d0137ddd9eb4f34944b1e845a303c68, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b9ba42e863864fc7903d17b507533fd4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/20081849d650417592144f32d263d9d3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c5203f83e1ec4bb7b0c36bb05a6ba1be, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3c4d2e7c562a41a59a2ee9346e396d50, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/86745284a80744fba80610d46556f6bd, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b6eb419447d0450d8caea0b6f8c281c6] to archive 2024-11-23T15:26:42,942 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:26:42,943 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c69ea70c178f47a982a0147d5fdc266c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c69ea70c178f47a982a0147d5fdc266c 2024-11-23T15:26:42,943 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/a4b0a5bafcae4501a6d4bb043a2e18d8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/a4b0a5bafcae4501a6d4bb043a2e18d8 2024-11-23T15:26:42,944 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/29f11b0d28fc49dbb513a6e5e63128a1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/29f11b0d28fc49dbb513a6e5e63128a1 2024-11-23T15:26:42,945 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c773eaba62f441c6982fe9144c18faa0 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c773eaba62f441c6982fe9144c18faa0 2024-11-23T15:26:42,945 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e4821e92534a43799a41acca3a77ca73 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e4821e92534a43799a41acca3a77ca73 2024-11-23T15:26:42,946 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/758f2179b2714d9fa9c1f5638b1d0694 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/758f2179b2714d9fa9c1f5638b1d0694 2024-11-23T15:26:42,947 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/28445b216c984108a1930bf1aa8be421 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/28445b216c984108a1930bf1aa8be421 2024-11-23T15:26:42,947 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0e9c4feae2046b987c5e591386af730 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0e9c4feae2046b987c5e591386af730 2024-11-23T15:26:42,948 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/47a5a32e7e27464fb0d2b94c97cee65f to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/47a5a32e7e27464fb0d2b94c97cee65f 2024-11-23T15:26:42,949 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/316f575f1c254bde8de6f3ba2dc36b68 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/316f575f1c254bde8de6f3ba2dc36b68 2024-11-23T15:26:42,950 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0607311fcc54253ad1badf3f9da078e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e0607311fcc54253ad1badf3f9da078e 2024-11-23T15:26:42,950 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/cfce668c75454353a4a9e0a50de7bf9d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/cfce668c75454353a4a9e0a50de7bf9d 2024-11-23T15:26:42,951 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3ae4b6f9760843c9842be6633db5a85c to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3ae4b6f9760843c9842be6633db5a85c 2024-11-23T15:26:42,952 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e42812cf123549eba58dcd776573c3c9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/e42812cf123549eba58dcd776573c3c9 2024-11-23T15:26:42,953 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/49a3c2cf0c534f60a64f3f42dfde5040 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/49a3c2cf0c534f60a64f3f42dfde5040 2024-11-23T15:26:42,953 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/1fa23610cf3947acbd56093691c07dbc to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/1fa23610cf3947acbd56093691c07dbc 2024-11-23T15:26:42,954 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b279310df7944afd918b4078b9803f54 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b279310df7944afd918b4078b9803f54 2024-11-23T15:26:42,955 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/d599dfbc15f140db87790d69190c8c3e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/d599dfbc15f140db87790d69190c8c3e 2024-11-23T15:26:42,956 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/8dcd148d673647d49875d7a48a3db71a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/8dcd148d673647d49875d7a48a3db71a 2024-11-23T15:26:42,956 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/9db6d522012b4dff8b3b518789d27daf to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/9db6d522012b4dff8b3b518789d27daf 2024-11-23T15:26:42,957 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/58be757d453e457bb6edc7c7eb3776d7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/58be757d453e457bb6edc7c7eb3776d7 2024-11-23T15:26:42,958 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3d0137ddd9eb4f34944b1e845a303c68 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3d0137ddd9eb4f34944b1e845a303c68 2024-11-23T15:26:42,959 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b9ba42e863864fc7903d17b507533fd4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b9ba42e863864fc7903d17b507533fd4 2024-11-23T15:26:42,959 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/20081849d650417592144f32d263d9d3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/20081849d650417592144f32d263d9d3 2024-11-23T15:26:42,960 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c5203f83e1ec4bb7b0c36bb05a6ba1be to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/c5203f83e1ec4bb7b0c36bb05a6ba1be 2024-11-23T15:26:42,961 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3c4d2e7c562a41a59a2ee9346e396d50 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/3c4d2e7c562a41a59a2ee9346e396d50 2024-11-23T15:26:42,961 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/86745284a80744fba80610d46556f6bd to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/86745284a80744fba80610d46556f6bd 2024-11-23T15:26:42,962 DEBUG [StoreCloser-TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b6eb419447d0450d8caea0b6f8c281c6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/b6eb419447d0450d8caea0b6f8c281c6 2024-11-23T15:26:42,966 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/recovered.edits/395.seqid, newMaxSeqId=395, maxSeqId=1 2024-11-23T15:26:42,966 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9. 2024-11-23T15:26:42,966 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1635): Region close journal for 858dfe5d7eeba9b673a3436fe5ded0f9: 2024-11-23T15:26:42,967 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(170): Closed 858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:42,968 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=858dfe5d7eeba9b673a3436fe5ded0f9, regionState=CLOSED 2024-11-23T15:26:42,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-23T15:26:42,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseRegionProcedure 858dfe5d7eeba9b673a3436fe5ded0f9, server=6a36843bf905,33811,1732375456985 in 1.7900 sec 2024-11-23T15:26:42,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-11-23T15:26:42,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=858dfe5d7eeba9b673a3436fe5ded0f9, UNASSIGN in 1.7930 sec 2024-11-23T15:26:42,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-23T15:26:42,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.7960 sec 2024-11-23T15:26:42,972 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375602972"}]},"ts":"1732375602972"} 2024-11-23T15:26:42,973 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T15:26:42,975 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T15:26:42,976 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8090 sec 2024-11-23T15:26:43,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T15:26:43,272 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-23T15:26:43,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T15:26:43,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:43,273 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:43,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-23T15:26:43,274 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=148, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:43,276 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:43,278 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/recovered.edits] 2024-11-23T15:26:43,280 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a8f2d83df455483a9f5e191acc31ff26 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/a8f2d83df455483a9f5e191acc31ff26 2024-11-23T15:26:43,280 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/df669a23c0514d29aa9dcfd0784780ac to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/A/df669a23c0514d29aa9dcfd0784780ac 2024-11-23T15:26:43,282 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/2e81f42d44564f1b80d750334878b795 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/2e81f42d44564f1b80d750334878b795 2024-11-23T15:26:43,283 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/505fd55701194678a8e3eb6b09b54d7e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/B/505fd55701194678a8e3eb6b09b54d7e 2024-11-23T15:26:43,284 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/0c2f4c65c36d4c168ec277097b734c54 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/0c2f4c65c36d4c168ec277097b734c54 2024-11-23T15:26:43,285 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/704af4463d8940138a73466f07728e64 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/C/704af4463d8940138a73466f07728e64 2024-11-23T15:26:43,287 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/recovered.edits/395.seqid to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9/recovered.edits/395.seqid 2024-11-23T15:26:43,287 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/858dfe5d7eeba9b673a3436fe5ded0f9 2024-11-23T15:26:43,287 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T15:26:43,289 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=148, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:43,290 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T15:26:43,291 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T15:26:43,292 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=148, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:43,292 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T15:26:43,292 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732375603292"}]},"ts":"9223372036854775807"} 2024-11-23T15:26:43,294 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T15:26:43,294 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 858dfe5d7eeba9b673a3436fe5ded0f9, NAME => 'TestAcidGuarantees,,1732375577314.858dfe5d7eeba9b673a3436fe5ded0f9.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T15:26:43,294 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T15:26:43,294 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732375603294"}]},"ts":"9223372036854775807"} 2024-11-23T15:26:43,295 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T15:26:43,297 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=148, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:43,297 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 25 msec 2024-11-23T15:26:43,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-23T15:26:43,374 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-23T15:26:43,384 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=239 (was 240), OpenFileDescriptor=451 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=277 (was 283), ProcessCount=11 (was 11), AvailableMemoryMB=3675 (was 3688) 2024-11-23T15:26:43,392 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=239, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=277, ProcessCount=11, AvailableMemoryMB=3675 2024-11-23T15:26:43,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T15:26:43,393 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:26:43,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:43,395 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T15:26:43,395 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:43,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 149 2024-11-23T15:26:43,395 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T15:26:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-23T15:26:43,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742308_1484 (size=963) 2024-11-23T15:26:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-23T15:26:43,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-23T15:26:43,802 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704 2024-11-23T15:26:43,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742309_1485 (size=53) 2024-11-23T15:26:43,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-23T15:26:44,207 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:26:44,207 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d02fcaab8ded3270abaa685bc6ecbc1d, disabling compactions & flushes 2024-11-23T15:26:44,207 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:44,207 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:44,207 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. after waiting 0 ms 2024-11-23T15:26:44,207 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:44,207 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:44,207 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:44,208 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T15:26:44,208 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732375604208"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732375604208"}]},"ts":"1732375604208"} 2024-11-23T15:26:44,209 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T15:26:44,209 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T15:26:44,210 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375604209"}]},"ts":"1732375604209"} 2024-11-23T15:26:44,210 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T15:26:44,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, ASSIGN}] 2024-11-23T15:26:44,214 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, ASSIGN 2024-11-23T15:26:44,214 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, ASSIGN; state=OFFLINE, location=6a36843bf905,33811,1732375456985; forceNewPlan=false, retain=false 2024-11-23T15:26:44,365 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=d02fcaab8ded3270abaa685bc6ecbc1d, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:26:44,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; OpenRegionProcedure d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:26:44,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-23T15:26:44,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:44,520 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:44,520 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:26:44,520 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:44,520 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:26:44,521 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:44,521 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:44,522 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:44,523 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:26:44,523 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d02fcaab8ded3270abaa685bc6ecbc1d columnFamilyName A 2024-11-23T15:26:44,523 DEBUG [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:44,523 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(327): Store=d02fcaab8ded3270abaa685bc6ecbc1d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:26:44,523 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:44,524 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:26:44,524 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d02fcaab8ded3270abaa685bc6ecbc1d columnFamilyName B 2024-11-23T15:26:44,524 DEBUG [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:44,524 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(327): Store=d02fcaab8ded3270abaa685bc6ecbc1d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:26:44,525 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:44,525 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:26:44,525 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d02fcaab8ded3270abaa685bc6ecbc1d columnFamilyName C 2024-11-23T15:26:44,525 DEBUG [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:44,526 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(327): Store=d02fcaab8ded3270abaa685bc6ecbc1d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:26:44,526 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:44,526 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:44,526 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:44,527 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:26:44,528 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:44,530 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T15:26:44,530 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened d02fcaab8ded3270abaa685bc6ecbc1d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71688659, jitterRate=0.06824426352977753}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:26:44,531 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:44,531 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., pid=151, masterSystemTime=1732375604517 2024-11-23T15:26:44,532 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:44,532 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:44,533 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=d02fcaab8ded3270abaa685bc6ecbc1d, regionState=OPEN, openSeqNum=2, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:26:44,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-23T15:26:44,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; OpenRegionProcedure d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 in 167 msec 2024-11-23T15:26:44,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-23T15:26:44,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, ASSIGN in 322 msec 2024-11-23T15:26:44,536 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T15:26:44,536 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375604536"}]},"ts":"1732375604536"} 2024-11-23T15:26:44,537 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T15:26:44,539 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T15:26:44,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1460 sec 2024-11-23T15:26:45,402 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T15:26:45,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-23T15:26:45,499 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-11-23T15:26:45,500 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5765d46a to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d9954b7 2024-11-23T15:26:45,506 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fb684eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:45,507 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:45,508 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50964, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:45,509 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T15:26:45,510 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T15:26:45,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T15:26:45,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T15:26:45,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-23T15:26:45,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742310_1486 (size=999) 2024-11-23T15:26:45,921 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-23T15:26:45,921 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-23T15:26:45,923 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T15:26:45,924 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, REOPEN/MOVE}] 2024-11-23T15:26:45,925 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, REOPEN/MOVE 2024-11-23T15:26:45,925 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=d02fcaab8ded3270abaa685bc6ecbc1d, regionState=CLOSING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:26:45,926 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T15:26:45,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; CloseRegionProcedure d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:26:46,077 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,078 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(124): Close d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,078 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T15:26:46,078 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1681): Closing d02fcaab8ded3270abaa685bc6ecbc1d, disabling compactions & flushes 2024-11-23T15:26:46,078 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,078 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,078 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. after waiting 0 ms 2024-11-23T15:26:46,078 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,081 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-23T15:26:46,082 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,082 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1635): Region close journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:46,082 WARN [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionServer(3786): Not adding moved region record: d02fcaab8ded3270abaa685bc6ecbc1d to self. 2024-11-23T15:26:46,083 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(170): Closed d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,083 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=d02fcaab8ded3270abaa685bc6ecbc1d, regionState=CLOSED 2024-11-23T15:26:46,085 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-23T15:26:46,085 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseRegionProcedure d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 in 158 msec 2024-11-23T15:26:46,085 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, REOPEN/MOVE; state=CLOSED, location=6a36843bf905,33811,1732375456985; forceNewPlan=false, retain=true 2024-11-23T15:26:46,236 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=d02fcaab8ded3270abaa685bc6ecbc1d, regionState=OPENING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,237 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=154, state=RUNNABLE; OpenRegionProcedure d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:26:46,389 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,391 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,391 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7285): Opening region: {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} 2024-11-23T15:26:46,391 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,391 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T15:26:46,391 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7327): checking encryption for d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,391 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7330): checking classloading for d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,392 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,393 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:26:46,393 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d02fcaab8ded3270abaa685bc6ecbc1d columnFamilyName A 2024-11-23T15:26:46,394 DEBUG [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:46,394 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(327): Store=d02fcaab8ded3270abaa685bc6ecbc1d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:26:46,395 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,395 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:26:46,396 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d02fcaab8ded3270abaa685bc6ecbc1d columnFamilyName B 2024-11-23T15:26:46,396 DEBUG [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:46,396 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(327): Store=d02fcaab8ded3270abaa685bc6ecbc1d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:26:46,396 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,397 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-23T15:26:46,397 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d02fcaab8ded3270abaa685bc6ecbc1d columnFamilyName C 2024-11-23T15:26:46,397 DEBUG [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:46,397 INFO [StoreOpener-d02fcaab8ded3270abaa685bc6ecbc1d-1 {}] regionserver.HStore(327): Store=d02fcaab8ded3270abaa685bc6ecbc1d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T15:26:46,397 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,398 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,398 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,399 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T15:26:46,401 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1085): writing seq id for d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,401 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1102): Opened d02fcaab8ded3270abaa685bc6ecbc1d; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60799559, jitterRate=-0.09401597082614899}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T15:26:46,402 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1001): Region open journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:46,402 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., pid=156, masterSystemTime=1732375606388 2024-11-23T15:26:46,403 DEBUG [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,404 INFO [RS_OPEN_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,404 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=d02fcaab8ded3270abaa685bc6ecbc1d, regionState=OPEN, openSeqNum=5, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=154 2024-11-23T15:26:46,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=154, state=SUCCESS; OpenRegionProcedure d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 in 168 msec 2024-11-23T15:26:46,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-23T15:26:46,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, REOPEN/MOVE in 481 msec 2024-11-23T15:26:46,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-23T15:26:46,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 484 msec 2024-11-23T15:26:46,409 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 896 msec 2024-11-23T15:26:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-23T15:26:46,411 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ac53e79 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d5efb7a 2024-11-23T15:26:46,414 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@644b7e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:46,415 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05bc9c3e to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fc332d8 2024-11-23T15:26:46,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c9b5141, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:46,419 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7181df3b to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17327621 2024-11-23T15:26:46,421 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11a52cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:46,422 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11030ef5 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1584f18a 2024-11-23T15:26:46,424 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7fe431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:46,425 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x69abefea to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b914bf4 2024-11-23T15:26:46,428 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@91d72db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:46,429 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f6a59e4 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d836f78 2024-11-23T15:26:46,433 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d7fe93b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:46,433 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x150e08ed to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53305d9b 2024-11-23T15:26:46,436 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c440f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:46,436 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a3b66d3 to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bb6288a 2024-11-23T15:26:46,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58460ef3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:46,442 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5cfdf76c to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6556601 2024-11-23T15:26:46,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e8cd1ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:46,446 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68c2838a to 127.0.0.1:62881 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@458a85fd 2024-11-23T15:26:46,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d832d43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T15:26:46,451 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-23T15:26:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-23T15:26:46,453 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:46,453 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:46,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:46,456 DEBUG [hconnection-0x32b26d8a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:46,457 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:46,460 DEBUG [hconnection-0x79012215-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:46,460 DEBUG [hconnection-0x5214d3f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:46,460 DEBUG [hconnection-0x2cc1e6fd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:46,461 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:46,461 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:46,461 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:46,464 DEBUG [hconnection-0x5a68d681-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:46,464 DEBUG [hconnection-0x3728468b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:46,465 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51034, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:46,465 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51048, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:46,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:26:46,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:46,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:26:46,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:46,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:26:46,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:46,472 DEBUG [hconnection-0x1e386e06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:46,472 DEBUG [hconnection-0x663eb73a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:46,473 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:46,473 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51060, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:46,478 DEBUG [hconnection-0xca5bfa8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:46,478 DEBUG [hconnection-0x123a018b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T15:26:46,478 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51068, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:46,479 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51072, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T15:26:46,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375666486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375666487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375666488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375666488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375666488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233fb22b2564734b7c9ae402139e6af1e6_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375606464/Put/seqid=0 2024-11-23T15:26:46,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742311_1487 (size=12154) 2024-11-23T15:26:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-23T15:26:46,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375666589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375666589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375666591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375666591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375666596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,604 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-23T15:26:46,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:46,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:46,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-23T15:26:46,757 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-23T15:26:46,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:46,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,758 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:46,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:46,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:46,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375666791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375666792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375666794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375666795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375666799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,909 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:46,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-23T15:26:46,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:46,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:46,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:46,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:46,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:46,916 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:46,919 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233fb22b2564734b7c9ae402139e6af1e6_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233fb22b2564734b7c9ae402139e6af1e6_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:46,920 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/714f3c6e690349c6937d51468be1a5ac, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:46,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/714f3c6e690349c6937d51468be1a5ac is 175, key is test_row_0/A:col10/1732375606464/Put/seqid=0 2024-11-23T15:26:46,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742312_1488 (size=30955) 2024-11-23T15:26:47,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-23T15:26:47,062 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-23T15:26:47,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:47,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:47,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375667094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:47,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375667095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:47,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375667097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:47,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375667097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:47,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375667102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,215 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-23T15:26:47,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:47,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,324 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/714f3c6e690349c6937d51468be1a5ac 2024-11-23T15:26:47,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/5e9d3eb70e5e4f9c90492ba70cf1dd45 is 50, key is test_row_0/B:col10/1732375606464/Put/seqid=0 2024-11-23T15:26:47,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742313_1489 (size=12001) 2024-11-23T15:26:47,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/5e9d3eb70e5e4f9c90492ba70cf1dd45 2024-11-23T15:26:47,367 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-23T15:26:47,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:47,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/f7b81576582648a7b6dcfaf441d990b4 is 50, key is test_row_0/C:col10/1732375606464/Put/seqid=0 2024-11-23T15:26:47,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742314_1490 (size=12001) 2024-11-23T15:26:47,520 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-23T15:26:47,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:47,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,521 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-23T15:26:47,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:47,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375667596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:47,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375667598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:47,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375667599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:47,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375667603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:47,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375667605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,673 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-23T15:26:47,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:47,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:47,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/f7b81576582648a7b6dcfaf441d990b4 2024-11-23T15:26:47,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/714f3c6e690349c6937d51468be1a5ac as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/714f3c6e690349c6937d51468be1a5ac 2024-11-23T15:26:47,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/714f3c6e690349c6937d51468be1a5ac, entries=150, sequenceid=17, filesize=30.2 K 2024-11-23T15:26:47,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/5e9d3eb70e5e4f9c90492ba70cf1dd45 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/5e9d3eb70e5e4f9c90492ba70cf1dd45 2024-11-23T15:26:47,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/5e9d3eb70e5e4f9c90492ba70cf1dd45, entries=150, sequenceid=17, filesize=11.7 K 2024-11-23T15:26:47,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/f7b81576582648a7b6dcfaf441d990b4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f7b81576582648a7b6dcfaf441d990b4 2024-11-23T15:26:47,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f7b81576582648a7b6dcfaf441d990b4, entries=150, sequenceid=17, filesize=11.7 K 2024-11-23T15:26:47,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for d02fcaab8ded3270abaa685bc6ecbc1d in 1322ms, sequenceid=17, compaction requested=false 2024-11-23T15:26:47,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:47,826 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:47,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-23T15:26:47,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:47,826 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T15:26:47,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:26:47,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:47,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:26:47,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:47,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:26:47,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:47,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e4f3a6c1fc944cc8b6e03ae302fc3b81_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375606487/Put/seqid=0 2024-11-23T15:26:47,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742315_1491 (size=12154) 2024-11-23T15:26:47,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:47,840 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e4f3a6c1fc944cc8b6e03ae302fc3b81_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e4f3a6c1fc944cc8b6e03ae302fc3b81_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:47,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/269f42b645204ab48878e0586d7b8874, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:47,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/269f42b645204ab48878e0586d7b8874 is 175, key is test_row_0/A:col10/1732375606487/Put/seqid=0 2024-11-23T15:26:47,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742316_1492 (size=30955) 2024-11-23T15:26:48,245 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/269f42b645204ab48878e0586d7b8874 2024-11-23T15:26:48,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/f25061395c2d4398b67fa5f0a0a3d9c4 is 50, key is test_row_0/B:col10/1732375606487/Put/seqid=0 2024-11-23T15:26:48,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742317_1493 (size=12001) 2024-11-23T15:26:48,486 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T15:26:48,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-23T15:26:48,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:48,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:48,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375668608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375668608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375668610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375668611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375668611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,656 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/f25061395c2d4398b67fa5f0a0a3d9c4 2024-11-23T15:26:48,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/dda747904cd54f38831710ed3f04a235 is 50, key is test_row_0/C:col10/1732375606487/Put/seqid=0 2024-11-23T15:26:48,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742318_1494 (size=12001) 2024-11-23T15:26:48,669 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/dda747904cd54f38831710ed3f04a235 2024-11-23T15:26:48,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/269f42b645204ab48878e0586d7b8874 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/269f42b645204ab48878e0586d7b8874 2024-11-23T15:26:48,677 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/269f42b645204ab48878e0586d7b8874, entries=150, sequenceid=41, filesize=30.2 K 2024-11-23T15:26:48,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/f25061395c2d4398b67fa5f0a0a3d9c4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f25061395c2d4398b67fa5f0a0a3d9c4 2024-11-23T15:26:48,681 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f25061395c2d4398b67fa5f0a0a3d9c4, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T15:26:48,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/dda747904cd54f38831710ed3f04a235 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/dda747904cd54f38831710ed3f04a235 2024-11-23T15:26:48,685 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/dda747904cd54f38831710ed3f04a235, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T15:26:48,686 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d02fcaab8ded3270abaa685bc6ecbc1d in 860ms, sequenceid=41, compaction requested=false 2024-11-23T15:26:48,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:48,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:48,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-23T15:26:48,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-23T15:26:48,689 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-23T15:26:48,689 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2340 sec 2024-11-23T15:26:48,690 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 2.2380 sec 2024-11-23T15:26:48,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:48,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T15:26:48,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:26:48,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:48,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:26:48,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:48,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:26:48,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:48,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112354755841ccb6422893a6faa29cf7e30e_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375608712/Put/seqid=0 2024-11-23T15:26:48,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742319_1495 (size=12154) 2024-11-23T15:26:48,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375668734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375668734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375668735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375668736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375668838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375668838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375668838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:48,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375668839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375669041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375669041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375669042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375669042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,134 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:49,137 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112354755841ccb6422893a6faa29cf7e30e_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112354755841ccb6422893a6faa29cf7e30e_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:49,138 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/b79cba593a8b4721a5b4004c2e3c1f21, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:49,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/b79cba593a8b4721a5b4004c2e3c1f21 is 175, key is test_row_0/A:col10/1732375608712/Put/seqid=0 2024-11-23T15:26:49,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742320_1496 (size=30955) 2024-11-23T15:26:49,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375669344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375669345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375669347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375669347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,543 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/b79cba593a8b4721a5b4004c2e3c1f21 2024-11-23T15:26:49,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/ecc739201ea04d1e9d3a3bac29bde2d1 is 50, key is test_row_0/B:col10/1732375608712/Put/seqid=0 2024-11-23T15:26:49,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742321_1497 (size=12001) 2024-11-23T15:26:49,553 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/ecc739201ea04d1e9d3a3bac29bde2d1 2024-11-23T15:26:49,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/d24a3ad18c0447bf8fef1d89a4d8bb9e is 50, key is test_row_0/C:col10/1732375608712/Put/seqid=0 2024-11-23T15:26:49,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742322_1498 (size=12001) 2024-11-23T15:26:49,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375669848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375669850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375669850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:49,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375669851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:49,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/d24a3ad18c0447bf8fef1d89a4d8bb9e 2024-11-23T15:26:49,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/b79cba593a8b4721a5b4004c2e3c1f21 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b79cba593a8b4721a5b4004c2e3c1f21 2024-11-23T15:26:49,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b79cba593a8b4721a5b4004c2e3c1f21, entries=150, sequenceid=54, filesize=30.2 K 2024-11-23T15:26:49,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/ecc739201ea04d1e9d3a3bac29bde2d1 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ecc739201ea04d1e9d3a3bac29bde2d1 2024-11-23T15:26:49,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ecc739201ea04d1e9d3a3bac29bde2d1, entries=150, sequenceid=54, filesize=11.7 K 2024-11-23T15:26:49,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/d24a3ad18c0447bf8fef1d89a4d8bb9e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d24a3ad18c0447bf8fef1d89a4d8bb9e 2024-11-23T15:26:49,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d24a3ad18c0447bf8fef1d89a4d8bb9e, entries=150, sequenceid=54, filesize=11.7 K 2024-11-23T15:26:49,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d02fcaab8ded3270abaa685bc6ecbc1d in 1265ms, sequenceid=54, compaction requested=true 2024-11-23T15:26:49,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:49,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:49,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:49,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:49,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:49,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:49,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:49,979 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:49,979 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:49,980 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:49,980 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:49,980 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/B is initiating minor compaction (all files) 2024-11-23T15:26:49,980 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/A is initiating minor compaction (all files) 2024-11-23T15:26:49,980 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/B in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:49,980 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/A in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:49,980 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/5e9d3eb70e5e4f9c90492ba70cf1dd45, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f25061395c2d4398b67fa5f0a0a3d9c4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ecc739201ea04d1e9d3a3bac29bde2d1] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=35.2 K 2024-11-23T15:26:49,980 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/714f3c6e690349c6937d51468be1a5ac, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/269f42b645204ab48878e0586d7b8874, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b79cba593a8b4721a5b4004c2e3c1f21] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=90.7 K 2024-11-23T15:26:49,980 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:49,980 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/714f3c6e690349c6937d51468be1a5ac, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/269f42b645204ab48878e0586d7b8874, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b79cba593a8b4721a5b4004c2e3c1f21] 2024-11-23T15:26:49,981 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e9d3eb70e5e4f9c90492ba70cf1dd45, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732375606464 2024-11-23T15:26:49,981 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 714f3c6e690349c6937d51468be1a5ac, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732375606464 2024-11-23T15:26:49,981 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f25061395c2d4398b67fa5f0a0a3d9c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732375606482 2024-11-23T15:26:49,981 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 269f42b645204ab48878e0586d7b8874, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732375606482 2024-11-23T15:26:49,981 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting ecc739201ea04d1e9d3a3bac29bde2d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375608606 2024-11-23T15:26:49,981 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting b79cba593a8b4721a5b4004c2e3c1f21, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375608606 2024-11-23T15:26:49,986 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:49,988 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#B#compaction#425 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:49,988 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/b83522acadd44acf82c8a28b8da64ff7 is 50, key is test_row_0/B:col10/1732375608712/Put/seqid=0 2024-11-23T15:26:49,989 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123a57082b9033f433cb07c1d1e2eafbe02_d02fcaab8ded3270abaa685bc6ecbc1d store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:49,991 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123a57082b9033f433cb07c1d1e2eafbe02_d02fcaab8ded3270abaa685bc6ecbc1d, store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:49,991 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a57082b9033f433cb07c1d1e2eafbe02_d02fcaab8ded3270abaa685bc6ecbc1d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:49,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742323_1499 (size=12104) 2024-11-23T15:26:49,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742324_1500 (size=4469) 2024-11-23T15:26:49,995 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#A#compaction#426 average throughput is 2.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:49,996 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/af06d13b37c14ae5baeed14bb2eb6fdf is 175, key is test_row_0/A:col10/1732375608712/Put/seqid=0 2024-11-23T15:26:50,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742325_1501 (size=31058) 2024-11-23T15:26:50,396 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/b83522acadd44acf82c8a28b8da64ff7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b83522acadd44acf82c8a28b8da64ff7 2024-11-23T15:26:50,401 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/B of d02fcaab8ded3270abaa685bc6ecbc1d into b83522acadd44acf82c8a28b8da64ff7(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:50,401 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:50,401 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/B, priority=13, startTime=1732375609979; duration=0sec 2024-11-23T15:26:50,401 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:50,401 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:B 2024-11-23T15:26:50,401 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:26:50,402 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:26:50,402 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/C is initiating minor compaction (all files) 2024-11-23T15:26:50,402 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/C in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:50,402 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f7b81576582648a7b6dcfaf441d990b4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/dda747904cd54f38831710ed3f04a235, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d24a3ad18c0447bf8fef1d89a4d8bb9e] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=35.2 K 2024-11-23T15:26:50,403 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f7b81576582648a7b6dcfaf441d990b4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732375606464 2024-11-23T15:26:50,403 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting dda747904cd54f38831710ed3f04a235, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732375606482 2024-11-23T15:26:50,403 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting d24a3ad18c0447bf8fef1d89a4d8bb9e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375608606 2024-11-23T15:26:50,404 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/af06d13b37c14ae5baeed14bb2eb6fdf as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/af06d13b37c14ae5baeed14bb2eb6fdf 2024-11-23T15:26:50,411 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/A of d02fcaab8ded3270abaa685bc6ecbc1d into af06d13b37c14ae5baeed14bb2eb6fdf(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:50,411 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:50,411 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/A, priority=13, startTime=1732375609979; duration=0sec 2024-11-23T15:26:50,412 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:50,412 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:A 2024-11-23T15:26:50,413 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#C#compaction#427 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:50,413 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/43f5fc425eb643b8bce2b829e1a8ecba is 50, key is test_row_0/C:col10/1732375608712/Put/seqid=0 2024-11-23T15:26:50,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742326_1502 (size=12104) 2024-11-23T15:26:50,431 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/43f5fc425eb643b8bce2b829e1a8ecba as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/43f5fc425eb643b8bce2b829e1a8ecba 2024-11-23T15:26:50,435 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/C of d02fcaab8ded3270abaa685bc6ecbc1d into 43f5fc425eb643b8bce2b829e1a8ecba(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:50,435 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:50,435 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/C, priority=13, startTime=1732375609979; duration=0sec 2024-11-23T15:26:50,435 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:50,435 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:C 2024-11-23T15:26:50,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-23T15:26:50,557 INFO [Thread-2189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-23T15:26:50,558 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:50,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-23T15:26:50,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T15:26:50,560 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:50,560 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:50,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:50,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:50,633 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T15:26:50,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:26:50,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:50,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:26:50,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:50,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:26:50,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:50,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d91e2e99e04944fc87bf9f50454087f8_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375608727/Put/seqid=0 2024-11-23T15:26:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742327_1503 (size=12154) 2024-11-23T15:26:50,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:50,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375670654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T15:26:50,712 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:50,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:50,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:50,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:50,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:50,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:50,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:50,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:50,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:50,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375670757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:50,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:50,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375670851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:50,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T15:26:50,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:50,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375670860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:50,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:50,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:50,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375670862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:50,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375670862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:50,865 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:50,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:50,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:50,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:50,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:50,865 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:50,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:50,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:50,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:50,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375670961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:51,017 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:51,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:51,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:51,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,043 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:51,047 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d91e2e99e04944fc87bf9f50454087f8_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d91e2e99e04944fc87bf9f50454087f8_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:51,047 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/7059af4257b24aab8cf6f852bcc9977a, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:51,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/7059af4257b24aab8cf6f852bcc9977a is 175, key is test_row_0/A:col10/1732375608727/Put/seqid=0 2024-11-23T15:26:51,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742328_1504 (size=30955) 2024-11-23T15:26:51,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T15:26:51,170 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:51,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:51,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:51,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:51,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375671264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:51,323 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:51,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:51,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:51,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,452 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=81, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/7059af4257b24aab8cf6f852bcc9977a 2024-11-23T15:26:51,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/ff4310f88c924b05bf4bdb774ae6e6eb is 50, key is test_row_0/B:col10/1732375608727/Put/seqid=0 2024-11-23T15:26:51,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742329_1505 (size=12001) 2024-11-23T15:26:51,475 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:51,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:51,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:51,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,628 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:51,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:51,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:51,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T15:26:51,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:51,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375671769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:51,781 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:51,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:51,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:51,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/ff4310f88c924b05bf4bdb774ae6e6eb 2024-11-23T15:26:51,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/db0f0d73cb0b4caa9d80aebf4497d5b9 is 50, key is test_row_0/C:col10/1732375608727/Put/seqid=0 2024-11-23T15:26:51,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742330_1506 (size=12001) 2024-11-23T15:26:51,934 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:51,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:51,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:51,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:51,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:51,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:52,087 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:52,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:52,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:52,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:52,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:52,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:52,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:52,239 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:52,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:52,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:52,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:52,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:52,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:52,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:52,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:52,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/db0f0d73cb0b4caa9d80aebf4497d5b9 2024-11-23T15:26:52,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/7059af4257b24aab8cf6f852bcc9977a as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/7059af4257b24aab8cf6f852bcc9977a 2024-11-23T15:26:52,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/7059af4257b24aab8cf6f852bcc9977a, entries=150, sequenceid=81, filesize=30.2 K 2024-11-23T15:26:52,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/ff4310f88c924b05bf4bdb774ae6e6eb as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ff4310f88c924b05bf4bdb774ae6e6eb 2024-11-23T15:26:52,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ff4310f88c924b05bf4bdb774ae6e6eb, entries=150, sequenceid=81, filesize=11.7 K 2024-11-23T15:26:52,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/db0f0d73cb0b4caa9d80aebf4497d5b9 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/db0f0d73cb0b4caa9d80aebf4497d5b9 2024-11-23T15:26:52,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/db0f0d73cb0b4caa9d80aebf4497d5b9, entries=150, sequenceid=81, filesize=11.7 K 2024-11-23T15:26:52,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d02fcaab8ded3270abaa685bc6ecbc1d in 1666ms, sequenceid=81, compaction requested=false 2024-11-23T15:26:52,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:52,392 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:52,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T15:26:52,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:52,393 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:52,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:26:52,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:52,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:26:52,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:52,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:26:52,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:52,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239ab7bf137eef4b59a5bb4397228000be_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375610646/Put/seqid=0 2024-11-23T15:26:52,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742331_1507 (size=12154) 2024-11-23T15:26:52,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:52,405 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239ab7bf137eef4b59a5bb4397228000be_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239ab7bf137eef4b59a5bb4397228000be_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:52,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/197224406483403aa537a735f53ca20b, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:52,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/197224406483403aa537a735f53ca20b is 175, key is test_row_0/A:col10/1732375610646/Put/seqid=0 2024-11-23T15:26:52,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742332_1508 (size=30955) 2024-11-23T15:26:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T15:26:52,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:52,810 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/197224406483403aa537a735f53ca20b 2024-11-23T15:26:52,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/83fbda0e71604c71aa22d525dcfd799d is 50, key is test_row_0/B:col10/1732375610646/Put/seqid=0 2024-11-23T15:26:52,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742333_1509 (size=12001) 2024-11-23T15:26:52,823 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/83fbda0e71604c71aa22d525dcfd799d 2024-11-23T15:26:52,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/2c2f49420add4d6e9976452ae43b3b1e is 50, key is test_row_0/C:col10/1732375610646/Put/seqid=0 2024-11-23T15:26:52,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742334_1510 (size=12001) 2024-11-23T15:26:52,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:52,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375672841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:52,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375672855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:52,858 DEBUG [Thread-2187 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4123 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:52,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375672870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:52,872 DEBUG [Thread-2181 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:52,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375672877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:52,878 DEBUG [Thread-2185 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:52,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:52,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375672883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:52,884 DEBUG [Thread-2183 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:52,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:52,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375672944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:53,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375673146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:53,236 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/2c2f49420add4d6e9976452ae43b3b1e 2024-11-23T15:26:53,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/197224406483403aa537a735f53ca20b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/197224406483403aa537a735f53ca20b 2024-11-23T15:26:53,243 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/197224406483403aa537a735f53ca20b, entries=150, sequenceid=93, filesize=30.2 K 2024-11-23T15:26:53,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/83fbda0e71604c71aa22d525dcfd799d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/83fbda0e71604c71aa22d525dcfd799d 2024-11-23T15:26:53,247 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/83fbda0e71604c71aa22d525dcfd799d, entries=150, sequenceid=93, filesize=11.7 K 2024-11-23T15:26:53,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/2c2f49420add4d6e9976452ae43b3b1e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/2c2f49420add4d6e9976452ae43b3b1e 2024-11-23T15:26:53,250 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/2c2f49420add4d6e9976452ae43b3b1e, entries=150, sequenceid=93, filesize=11.7 K 2024-11-23T15:26:53,251 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d02fcaab8ded3270abaa685bc6ecbc1d in 859ms, sequenceid=93, compaction requested=true 2024-11-23T15:26:53,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:53,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:53,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-23T15:26:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-23T15:26:53,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-23T15:26:53,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6910 sec 2024-11-23T15:26:53,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 2.6960 sec 2024-11-23T15:26:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:53,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T15:26:53,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:26:53,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:53,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:26:53,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:53,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:26:53,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:53,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233d62c4f4936e4179b815be5fdb4f3718_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375612835/Put/seqid=0 2024-11-23T15:26:53,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742335_1511 (size=14594) 2024-11-23T15:26:53,470 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:53,473 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233d62c4f4936e4179b815be5fdb4f3718_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233d62c4f4936e4179b815be5fdb4f3718_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:53,474 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/0b03c46e332c4fcdacb12faec47226bc, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:53,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/0b03c46e332c4fcdacb12faec47226bc is 175, key is test_row_0/A:col10/1732375612835/Put/seqid=0 2024-11-23T15:26:53,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375673482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:53,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742336_1512 (size=39549) 2024-11-23T15:26:53,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375673586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:53,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375673788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:53,887 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/0b03c46e332c4fcdacb12faec47226bc 2024-11-23T15:26:53,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/7cdf2fab64944efbbd226f7b210d4027 is 50, key is test_row_0/B:col10/1732375612835/Put/seqid=0 2024-11-23T15:26:53,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742337_1513 (size=12001) 2024-11-23T15:26:53,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/7cdf2fab64944efbbd226f7b210d4027 2024-11-23T15:26:53,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/25b2655a548b4782b46d33d829a9fb17 is 50, key is test_row_0/C:col10/1732375612835/Put/seqid=0 2024-11-23T15:26:53,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742338_1514 (size=12001) 2024-11-23T15:26:54,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:54,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375674090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:54,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/25b2655a548b4782b46d33d829a9fb17 2024-11-23T15:26:54,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/0b03c46e332c4fcdacb12faec47226bc as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/0b03c46e332c4fcdacb12faec47226bc 2024-11-23T15:26:54,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/0b03c46e332c4fcdacb12faec47226bc, entries=200, sequenceid=118, filesize=38.6 K 2024-11-23T15:26:54,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/7cdf2fab64944efbbd226f7b210d4027 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/7cdf2fab64944efbbd226f7b210d4027 2024-11-23T15:26:54,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/7cdf2fab64944efbbd226f7b210d4027, entries=150, sequenceid=118, filesize=11.7 K 2024-11-23T15:26:54,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/25b2655a548b4782b46d33d829a9fb17 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/25b2655a548b4782b46d33d829a9fb17 2024-11-23T15:26:54,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/25b2655a548b4782b46d33d829a9fb17, entries=150, sequenceid=118, filesize=11.7 K 2024-11-23T15:26:54,336 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d02fcaab8ded3270abaa685bc6ecbc1d in 886ms, sequenceid=118, compaction requested=true 2024-11-23T15:26:54,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:54,336 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:54,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:54,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:54,337 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:54,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:54,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:54,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:54,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:54,337 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132517 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:54,337 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/A is initiating minor compaction (all files) 2024-11-23T15:26:54,337 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/A in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:54,338 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/af06d13b37c14ae5baeed14bb2eb6fdf, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/7059af4257b24aab8cf6f852bcc9977a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/197224406483403aa537a735f53ca20b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/0b03c46e332c4fcdacb12faec47226bc] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=129.4 K 2024-11-23T15:26:54,338 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:54,338 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:54,338 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/af06d13b37c14ae5baeed14bb2eb6fdf, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/7059af4257b24aab8cf6f852bcc9977a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/197224406483403aa537a735f53ca20b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/0b03c46e332c4fcdacb12faec47226bc] 2024-11-23T15:26:54,338 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/B is initiating minor compaction (all files) 2024-11-23T15:26:54,338 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/B in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:54,338 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b83522acadd44acf82c8a28b8da64ff7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ff4310f88c924b05bf4bdb774ae6e6eb, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/83fbda0e71604c71aa22d525dcfd799d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/7cdf2fab64944efbbd226f7b210d4027] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=47.0 K 2024-11-23T15:26:54,338 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting af06d13b37c14ae5baeed14bb2eb6fdf, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375608606 2024-11-23T15:26:54,338 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b83522acadd44acf82c8a28b8da64ff7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375608606 2024-11-23T15:26:54,338 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7059af4257b24aab8cf6f852bcc9977a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732375608727 2024-11-23T15:26:54,339 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting ff4310f88c924b05bf4bdb774ae6e6eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732375608727 2024-11-23T15:26:54,339 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 197224406483403aa537a735f53ca20b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732375610646 2024-11-23T15:26:54,339 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 83fbda0e71604c71aa22d525dcfd799d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732375610646 2024-11-23T15:26:54,339 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b03c46e332c4fcdacb12faec47226bc, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375612835 2024-11-23T15:26:54,339 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cdf2fab64944efbbd226f7b210d4027, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375612835 2024-11-23T15:26:54,348 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#B#compaction#437 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:54,349 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/590e0c43d941418c8be6fbaa69cd8813 is 50, key is test_row_0/B:col10/1732375612835/Put/seqid=0 2024-11-23T15:26:54,350 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:54,358 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112390a50aeba93c4bc2a664dda986a43e89_d02fcaab8ded3270abaa685bc6ecbc1d store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:54,360 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112390a50aeba93c4bc2a664dda986a43e89_d02fcaab8ded3270abaa685bc6ecbc1d, store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:54,361 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112390a50aeba93c4bc2a664dda986a43e89_d02fcaab8ded3270abaa685bc6ecbc1d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:54,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742339_1515 (size=12241) 2024-11-23T15:26:54,370 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/590e0c43d941418c8be6fbaa69cd8813 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/590e0c43d941418c8be6fbaa69cd8813 2024-11-23T15:26:54,374 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/B of d02fcaab8ded3270abaa685bc6ecbc1d into 590e0c43d941418c8be6fbaa69cd8813(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:54,374 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:54,374 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/B, priority=12, startTime=1732375614337; duration=0sec 2024-11-23T15:26:54,374 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:54,374 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:B 2024-11-23T15:26:54,375 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:54,377 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:54,377 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/C is initiating minor compaction (all files) 2024-11-23T15:26:54,377 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/C in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:54,377 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/43f5fc425eb643b8bce2b829e1a8ecba, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/db0f0d73cb0b4caa9d80aebf4497d5b9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/2c2f49420add4d6e9976452ae43b3b1e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/25b2655a548b4782b46d33d829a9fb17] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=47.0 K 2024-11-23T15:26:54,377 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 43f5fc425eb643b8bce2b829e1a8ecba, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732375608606 2024-11-23T15:26:54,378 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting db0f0d73cb0b4caa9d80aebf4497d5b9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732375608727 2024-11-23T15:26:54,378 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c2f49420add4d6e9976452ae43b3b1e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732375610646 2024-11-23T15:26:54,379 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 25b2655a548b4782b46d33d829a9fb17, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375612835 2024-11-23T15:26:54,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742340_1516 (size=4469) 2024-11-23T15:26:54,384 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#A#compaction#438 average throughput is 0.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:54,385 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/9b6458285d844c05b1cdc109ff9f9955 is 175, key is test_row_0/A:col10/1732375612835/Put/seqid=0 2024-11-23T15:26:54,388 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#C#compaction#439 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:54,389 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/15c281dea3fd45e0bebed93c544c1827 is 50, key is test_row_0/C:col10/1732375612835/Put/seqid=0 2024-11-23T15:26:54,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742341_1517 (size=31195) 2024-11-23T15:26:54,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742342_1518 (size=12241) 2024-11-23T15:26:54,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:54,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T15:26:54,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:26:54,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:54,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:26:54,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:54,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:26:54,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:54,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112389f9dc607588482b911c74cff84b2fb3_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375613476/Put/seqid=0 2024-11-23T15:26:54,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742343_1519 (size=12204) 2024-11-23T15:26:54,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:54,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375674661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:54,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T15:26:54,664 INFO [Thread-2189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-23T15:26:54,665 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:54,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-23T15:26:54,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T15:26:54,666 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:54,667 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:54,667 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:54,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:54,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375674764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:54,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T15:26:54,794 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/9b6458285d844c05b1cdc109ff9f9955 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/9b6458285d844c05b1cdc109ff9f9955 2024-11-23T15:26:54,806 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/A of d02fcaab8ded3270abaa685bc6ecbc1d into 9b6458285d844c05b1cdc109ff9f9955(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:54,806 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:54,806 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/A, priority=12, startTime=1732375614336; duration=0sec 2024-11-23T15:26:54,807 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:54,807 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:A 2024-11-23T15:26:54,814 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/15c281dea3fd45e0bebed93c544c1827 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/15c281dea3fd45e0bebed93c544c1827 2024-11-23T15:26:54,817 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/C of d02fcaab8ded3270abaa685bc6ecbc1d into 15c281dea3fd45e0bebed93c544c1827(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:54,817 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:54,818 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/C, priority=12, startTime=1732375614337; duration=0sec 2024-11-23T15:26:54,818 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:54,818 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:C 2024-11-23T15:26:54,818 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:54,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:54,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:54,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:54,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:54,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:54,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:54,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:54,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:54,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375674965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:54,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T15:26:54,971 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:54,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:54,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:54,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:54,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:54,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:54,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:54,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,007 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:55,010 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112389f9dc607588482b911c74cff84b2fb3_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112389f9dc607588482b911c74cff84b2fb3_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:55,011 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/a90ac3a958fa46e896edff57e00108f7, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:55,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/a90ac3a958fa46e896edff57e00108f7 is 175, key is test_row_0/A:col10/1732375613476/Put/seqid=0 2024-11-23T15:26:55,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742344_1520 (size=31005) 2024-11-23T15:26:55,124 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:55,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:55,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:55,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,124 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:55,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375675267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:55,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T15:26:55,276 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:55,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:55,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:55,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,417 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/a90ac3a958fa46e896edff57e00108f7 2024-11-23T15:26:55,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/af57329485fe41c09cbf66f920caa2cc is 50, key is test_row_0/B:col10/1732375613476/Put/seqid=0 2024-11-23T15:26:55,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742345_1521 (size=12051) 2024-11-23T15:26:55,429 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:55,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:55,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:55,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,582 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:55,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:55,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:55,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,734 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:55,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:55,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:55,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T15:26:55,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375675772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:55,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/af57329485fe41c09cbf66f920caa2cc 2024-11-23T15:26:55,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/f56e16dbe1904944a5749768dcdd1ea6 is 50, key is test_row_0/C:col10/1732375613476/Put/seqid=0 2024-11-23T15:26:55,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742346_1522 (size=12051) 2024-11-23T15:26:55,887 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:55,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:55,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:55,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:55,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:55,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:56,040 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:56,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:56,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:56,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:56,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:56,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:56,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:56,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:56,192 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:56,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:56,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:56,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:56,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:56,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:56,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:56,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:56,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/f56e16dbe1904944a5749768dcdd1ea6 2024-11-23T15:26:56,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/a90ac3a958fa46e896edff57e00108f7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/a90ac3a958fa46e896edff57e00108f7 2024-11-23T15:26:56,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/a90ac3a958fa46e896edff57e00108f7, entries=150, sequenceid=131, filesize=30.3 K 2024-11-23T15:26:56,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/af57329485fe41c09cbf66f920caa2cc as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/af57329485fe41c09cbf66f920caa2cc 2024-11-23T15:26:56,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/af57329485fe41c09cbf66f920caa2cc, entries=150, sequenceid=131, filesize=11.8 K 2024-11-23T15:26:56,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/f56e16dbe1904944a5749768dcdd1ea6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f56e16dbe1904944a5749768dcdd1ea6 2024-11-23T15:26:56,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f56e16dbe1904944a5749768dcdd1ea6, entries=150, sequenceid=131, filesize=11.8 K 2024-11-23T15:26:56,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d02fcaab8ded3270abaa685bc6ecbc1d in 1666ms, sequenceid=131, compaction requested=false 2024-11-23T15:26:56,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:56,345 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:56,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T15:26:56,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:56,346 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T15:26:56,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:26:56,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:56,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:26:56,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:56,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:26:56,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:56,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232a045e5f8beb466487e92d24f4b19334_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375614660/Put/seqid=0 2024-11-23T15:26:56,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742347_1523 (size=12304) 2024-11-23T15:26:56,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:56,360 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232a045e5f8beb466487e92d24f4b19334_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232a045e5f8beb466487e92d24f4b19334_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:56,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/ffabb2b2dfee40f4841db0faa3d6b6f3, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:56,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/ffabb2b2dfee40f4841db0faa3d6b6f3 is 175, key is test_row_0/A:col10/1732375614660/Put/seqid=0 2024-11-23T15:26:56,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742348_1524 (size=31105) 2024-11-23T15:26:56,365 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/ffabb2b2dfee40f4841db0faa3d6b6f3 2024-11-23T15:26:56,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/a914d376348541e0b17e9f2c02fd5322 is 50, key is test_row_0/B:col10/1732375614660/Put/seqid=0 2024-11-23T15:26:56,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742349_1525 (size=12151) 2024-11-23T15:26:56,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T15:26:56,775 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/a914d376348541e0b17e9f2c02fd5322 2024-11-23T15:26:56,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:56,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:56,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/c9298c0ac12840edb9ad2ff6cc93b8a6 is 50, key is test_row_0/C:col10/1732375614660/Put/seqid=0 2024-11-23T15:26:56,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742350_1526 (size=12151) 2024-11-23T15:26:56,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:56,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375676796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:56,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:56,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51018 deadline: 1732375676892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:56,894 DEBUG [Thread-2187 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:56,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:56,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375676899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:56,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:56,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51034 deadline: 1732375676902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:56,903 DEBUG [Thread-2185 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:56,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:56,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51004 deadline: 1732375676909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:56,911 DEBUG [Thread-2181 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:56,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:56,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51068 deadline: 1732375676912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:56,914 DEBUG [Thread-2183 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., hostname=6a36843bf905,33811,1732375456985, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T15:26:57,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:57,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375677101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:57,187 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/c9298c0ac12840edb9ad2ff6cc93b8a6 2024-11-23T15:26:57,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/ffabb2b2dfee40f4841db0faa3d6b6f3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ffabb2b2dfee40f4841db0faa3d6b6f3 2024-11-23T15:26:57,194 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ffabb2b2dfee40f4841db0faa3d6b6f3, entries=150, sequenceid=157, filesize=30.4 K 2024-11-23T15:26:57,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/a914d376348541e0b17e9f2c02fd5322 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/a914d376348541e0b17e9f2c02fd5322 2024-11-23T15:26:57,198 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/a914d376348541e0b17e9f2c02fd5322, entries=150, sequenceid=157, filesize=11.9 K 2024-11-23T15:26:57,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/c9298c0ac12840edb9ad2ff6cc93b8a6 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/c9298c0ac12840edb9ad2ff6cc93b8a6 2024-11-23T15:26:57,202 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/c9298c0ac12840edb9ad2ff6cc93b8a6, entries=150, sequenceid=157, filesize=11.9 K 2024-11-23T15:26:57,203 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d02fcaab8ded3270abaa685bc6ecbc1d in 857ms, sequenceid=157, compaction requested=true 2024-11-23T15:26:57,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:57,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:57,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-23T15:26:57,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-23T15:26:57,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-23T15:26:57,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5370 sec 2024-11-23T15:26:57,207 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.5410 sec 2024-11-23T15:26:57,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:57,405 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T15:26:57,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:26:57,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:57,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:26:57,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:57,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:26:57,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:57,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238d577b2a587b4b7ebf35b85a4ac01fb5_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375616793/Put/seqid=0 2024-11-23T15:26:57,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742351_1527 (size=12304) 2024-11-23T15:26:57,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:57,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375677461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:57,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:57,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375677564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:57,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375677766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:57,817 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:57,821 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238d577b2a587b4b7ebf35b85a4ac01fb5_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238d577b2a587b4b7ebf35b85a4ac01fb5_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:57,821 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/dbcf26a8eea4486d8d3373e9cea7bb75, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:57,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/dbcf26a8eea4486d8d3373e9cea7bb75 is 175, key is test_row_0/A:col10/1732375616793/Put/seqid=0 2024-11-23T15:26:57,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742352_1528 (size=31105) 2024-11-23T15:26:58,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:58,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375678068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:58,226 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/dbcf26a8eea4486d8d3373e9cea7bb75 2024-11-23T15:26:58,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/6a1228eb2c584a55aca8fbf17af033a4 is 50, key is test_row_0/B:col10/1732375616793/Put/seqid=0 2024-11-23T15:26:58,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742353_1529 (size=12151) 2024-11-23T15:26:58,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:58,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375678571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:58,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/6a1228eb2c584a55aca8fbf17af033a4 2024-11-23T15:26:58,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/0677011a7e0d4ddd94d7375bd15860b4 is 50, key is test_row_0/C:col10/1732375616793/Put/seqid=0 2024-11-23T15:26:58,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742354_1530 (size=12151) 2024-11-23T15:26:58,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T15:26:58,771 INFO [Thread-2189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-23T15:26:58,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:26:58,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-23T15:26:58,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T15:26:58,774 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:26:58,775 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:26:58,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:26:58,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T15:26:58,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:58,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T15:26:58,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:58,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:58,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:58,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:58,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:58,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:26:59,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/0677011a7e0d4ddd94d7375bd15860b4 2024-11-23T15:26:59,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/dbcf26a8eea4486d8d3373e9cea7bb75 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/dbcf26a8eea4486d8d3373e9cea7bb75 2024-11-23T15:26:59,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/dbcf26a8eea4486d8d3373e9cea7bb75, entries=150, sequenceid=170, filesize=30.4 K 2024-11-23T15:26:59,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/6a1228eb2c584a55aca8fbf17af033a4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6a1228eb2c584a55aca8fbf17af033a4 2024-11-23T15:26:59,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6a1228eb2c584a55aca8fbf17af033a4, entries=150, sequenceid=170, filesize=11.9 K 2024-11-23T15:26:59,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/0677011a7e0d4ddd94d7375bd15860b4 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/0677011a7e0d4ddd94d7375bd15860b4 2024-11-23T15:26:59,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/0677011a7e0d4ddd94d7375bd15860b4, entries=150, sequenceid=170, filesize=11.9 K 2024-11-23T15:26:59,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d02fcaab8ded3270abaa685bc6ecbc1d in 1658ms, sequenceid=170, compaction requested=true 2024-11-23T15:26:59,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:59,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:26:59,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:59,063 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:59,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:26:59,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:59,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:26:59,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:59,063 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:59,064 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:59,064 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48594 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:59,064 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/A is initiating minor compaction (all files) 2024-11-23T15:26:59,064 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/B is initiating minor compaction (all files) 2024-11-23T15:26:59,064 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/A in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:59,064 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/B in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:59,064 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/9b6458285d844c05b1cdc109ff9f9955, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/a90ac3a958fa46e896edff57e00108f7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ffabb2b2dfee40f4841db0faa3d6b6f3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/dbcf26a8eea4486d8d3373e9cea7bb75] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=121.5 K 2024-11-23T15:26:59,064 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/590e0c43d941418c8be6fbaa69cd8813, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/af57329485fe41c09cbf66f920caa2cc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/a914d376348541e0b17e9f2c02fd5322, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6a1228eb2c584a55aca8fbf17af033a4] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=47.5 K 2024-11-23T15:26:59,064 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:59,064 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/9b6458285d844c05b1cdc109ff9f9955, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/a90ac3a958fa46e896edff57e00108f7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ffabb2b2dfee40f4841db0faa3d6b6f3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/dbcf26a8eea4486d8d3373e9cea7bb75] 2024-11-23T15:26:59,064 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 590e0c43d941418c8be6fbaa69cd8813, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375612835 2024-11-23T15:26:59,065 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b6458285d844c05b1cdc109ff9f9955, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375612835 2024-11-23T15:26:59,065 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting af57329485fe41c09cbf66f920caa2cc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732375613476 2024-11-23T15:26:59,065 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting a90ac3a958fa46e896edff57e00108f7, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732375613476 2024-11-23T15:26:59,065 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting a914d376348541e0b17e9f2c02fd5322, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732375614644 2024-11-23T15:26:59,065 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffabb2b2dfee40f4841db0faa3d6b6f3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732375614644 2024-11-23T15:26:59,065 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a1228eb2c584a55aca8fbf17af033a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732375616789 2024-11-23T15:26:59,065 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbcf26a8eea4486d8d3373e9cea7bb75, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732375616789 2024-11-23T15:26:59,071 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:59,072 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#B#compaction#449 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:59,073 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112332381d54d9e641878dc38c21d844f07d_d02fcaab8ded3270abaa685bc6ecbc1d store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:59,073 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/6f34d7670cf54394a6808158cc92c446 is 50, key is test_row_0/B:col10/1732375616793/Put/seqid=0 2024-11-23T15:26:59,075 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112332381d54d9e641878dc38c21d844f07d_d02fcaab8ded3270abaa685bc6ecbc1d, store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:59,075 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112332381d54d9e641878dc38c21d844f07d_d02fcaab8ded3270abaa685bc6ecbc1d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:59,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T15:26:59,079 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:26:59,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T15:26:59,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:59,080 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T15:26:59,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:26:59,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:59,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:26:59,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:59,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:26:59,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:26:59,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742355_1531 (size=12527) 2024-11-23T15:26:59,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742356_1532 (size=4469) 2024-11-23T15:26:59,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411234f9bbbb123394774bba9e7daf4ad8f3e_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375617453/Put/seqid=0 2024-11-23T15:26:59,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742357_1533 (size=12304) 2024-11-23T15:26:59,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T15:26:59,490 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/6f34d7670cf54394a6808158cc92c446 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6f34d7670cf54394a6808158cc92c446 2024-11-23T15:26:59,493 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/B of d02fcaab8ded3270abaa685bc6ecbc1d into 6f34d7670cf54394a6808158cc92c446(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:59,493 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:59,493 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/B, priority=12, startTime=1732375619063; duration=0sec 2024-11-23T15:26:59,494 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:26:59,494 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:B 2024-11-23T15:26:59,494 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T15:26:59,495 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48594 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T15:26:59,495 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#A#compaction#450 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:59,495 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/C is initiating minor compaction (all files) 2024-11-23T15:26:59,495 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/C in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:26:59,495 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/15c281dea3fd45e0bebed93c544c1827, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f56e16dbe1904944a5749768dcdd1ea6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/c9298c0ac12840edb9ad2ff6cc93b8a6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/0677011a7e0d4ddd94d7375bd15860b4] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=47.5 K 2024-11-23T15:26:59,496 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/2da65abb4c1b459fae00630180784311 is 175, key is test_row_0/A:col10/1732375616793/Put/seqid=0 2024-11-23T15:26:59,496 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 15c281dea3fd45e0bebed93c544c1827, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732375612835 2024-11-23T15:26:59,496 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f56e16dbe1904944a5749768dcdd1ea6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732375613476 2024-11-23T15:26:59,496 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting c9298c0ac12840edb9ad2ff6cc93b8a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732375614644 2024-11-23T15:26:59,497 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 0677011a7e0d4ddd94d7375bd15860b4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732375616789 2024-11-23T15:26:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742358_1534 (size=31481) 2024-11-23T15:26:59,504 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#C#compaction#452 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:26:59,504 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/842081bb845d43abbc06fa49632e3d97 is 50, key is test_row_0/C:col10/1732375616793/Put/seqid=0 2024-11-23T15:26:59,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:26:59,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742359_1535 (size=12527) 2024-11-23T15:26:59,515 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411234f9bbbb123394774bba9e7daf4ad8f3e_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411234f9bbbb123394774bba9e7daf4ad8f3e_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:59,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/e02a75576eaf4a63b356d2436ed49778, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:26:59,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/e02a75576eaf4a63b356d2436ed49778 is 175, key is test_row_0/A:col10/1732375617453/Put/seqid=0 2024-11-23T15:26:59,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742360_1536 (size=31105) 2024-11-23T15:26:59,520 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=193, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/e02a75576eaf4a63b356d2436ed49778 2024-11-23T15:26:59,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/2f2506283c994a4aa2e389f3f1d09b88 is 50, key is test_row_0/B:col10/1732375617453/Put/seqid=0 2024-11-23T15:26:59,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742361_1537 (size=12151) 2024-11-23T15:26:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:26:59,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:26:59,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375679601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:59,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375679705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T15:26:59,903 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/2da65abb4c1b459fae00630180784311 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/2da65abb4c1b459fae00630180784311 2024-11-23T15:26:59,907 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/A of d02fcaab8ded3270abaa685bc6ecbc1d into 2da65abb4c1b459fae00630180784311(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:59,908 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:59,908 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/A, priority=12, startTime=1732375619063; duration=0sec 2024-11-23T15:26:59,908 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:59,908 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:A 2024-11-23T15:26:59,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:26:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375679907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:26:59,916 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/842081bb845d43abbc06fa49632e3d97 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/842081bb845d43abbc06fa49632e3d97 2024-11-23T15:26:59,920 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/C of d02fcaab8ded3270abaa685bc6ecbc1d into 842081bb845d43abbc06fa49632e3d97(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:26:59,920 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:26:59,920 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/C, priority=12, startTime=1732375619063; duration=0sec 2024-11-23T15:26:59,920 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:26:59,920 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:C 2024-11-23T15:26:59,930 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/2f2506283c994a4aa2e389f3f1d09b88 2024-11-23T15:26:59,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/11579e633e2643c581b81305d31b9c6d is 50, key is test_row_0/C:col10/1732375617453/Put/seqid=0 2024-11-23T15:26:59,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742362_1538 (size=12151) 2024-11-23T15:27:00,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:00,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375680210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:00,340 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/11579e633e2643c581b81305d31b9c6d 2024-11-23T15:27:00,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/e02a75576eaf4a63b356d2436ed49778 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e02a75576eaf4a63b356d2436ed49778 2024-11-23T15:27:00,346 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e02a75576eaf4a63b356d2436ed49778, entries=150, sequenceid=193, filesize=30.4 K 2024-11-23T15:27:00,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/2f2506283c994a4aa2e389f3f1d09b88 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2f2506283c994a4aa2e389f3f1d09b88 2024-11-23T15:27:00,350 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2f2506283c994a4aa2e389f3f1d09b88, entries=150, sequenceid=193, filesize=11.9 K 2024-11-23T15:27:00,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/11579e633e2643c581b81305d31b9c6d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/11579e633e2643c581b81305d31b9c6d 2024-11-23T15:27:00,354 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/11579e633e2643c581b81305d31b9c6d, entries=150, sequenceid=193, filesize=11.9 K 2024-11-23T15:27:00,354 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d02fcaab8ded3270abaa685bc6ecbc1d in 1274ms, sequenceid=193, compaction requested=false 2024-11-23T15:27:00,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:00,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:00,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-23T15:27:00,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-23T15:27:00,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-23T15:27:00,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5810 sec 2024-11-23T15:27:00,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.5860 sec 2024-11-23T15:27:00,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:00,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T15:27:00,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:27:00,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:00,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:27:00,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:00,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:27:00,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:00,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112355add34463d148e790519c8623228ec5_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375619585/Put/seqid=0 2024-11-23T15:27:00,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742363_1539 (size=12304) 2024-11-23T15:27:00,725 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:27:00,728 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112355add34463d148e790519c8623228ec5_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112355add34463d148e790519c8623228ec5_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:00,729 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/835b912cf6634022a0166b8bfcc4a748, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:00,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/835b912cf6634022a0166b8bfcc4a748 is 175, key is test_row_0/A:col10/1732375619585/Put/seqid=0 2024-11-23T15:27:00,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742364_1540 (size=31105) 2024-11-23T15:27:00,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:00,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375680760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:00,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:00,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375680863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:00,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T15:27:00,878 INFO [Thread-2189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-23T15:27:00,879 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:27:00,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-23T15:27:00,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T15:27:00,881 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:27:00,881 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:27:00,881 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:27:00,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T15:27:01,033 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:01,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T15:27:01,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:27:01,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375681067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:01,137 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/835b912cf6634022a0166b8bfcc4a748 2024-11-23T15:27:01,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/b7fb5d3b002c4c5c9abcce9c6fb99811 is 50, key is test_row_0/B:col10/1732375619585/Put/seqid=0 2024-11-23T15:27:01,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742365_1541 (size=12151) 2024-11-23T15:27:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T15:27:01,185 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:01,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T15:27:01,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:27:01,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,338 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:01,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T15:27:01,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:27:01,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:01,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375681372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:01,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T15:27:01,491 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:01,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T15:27:01,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:27:01,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,549 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/b7fb5d3b002c4c5c9abcce9c6fb99811 2024-11-23T15:27:01,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/eaa12009c5e548ceae211406b56557df is 50, key is test_row_0/C:col10/1732375619585/Put/seqid=0 2024-11-23T15:27:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742366_1542 (size=12151) 2024-11-23T15:27:01,644 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:01,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T15:27:01,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:27:01,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,797 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:01,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T15:27:01,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:27:01,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:01,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375681874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:01,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:01,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T15:27:01,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:27:01,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:01,962 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/eaa12009c5e548ceae211406b56557df 2024-11-23T15:27:01,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/835b912cf6634022a0166b8bfcc4a748 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/835b912cf6634022a0166b8bfcc4a748 2024-11-23T15:27:01,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/835b912cf6634022a0166b8bfcc4a748, entries=150, sequenceid=210, filesize=30.4 K 2024-11-23T15:27:01,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/b7fb5d3b002c4c5c9abcce9c6fb99811 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b7fb5d3b002c4c5c9abcce9c6fb99811 2024-11-23T15:27:01,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b7fb5d3b002c4c5c9abcce9c6fb99811, entries=150, sequenceid=210, filesize=11.9 K 2024-11-23T15:27:01,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/eaa12009c5e548ceae211406b56557df as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/eaa12009c5e548ceae211406b56557df 2024-11-23T15:27:01,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/eaa12009c5e548ceae211406b56557df, entries=150, sequenceid=210, filesize=11.9 K 2024-11-23T15:27:01,977 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d02fcaab8ded3270abaa685bc6ecbc1d in 1264ms, sequenceid=210, compaction requested=true 2024-11-23T15:27:01,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:01,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:27:01,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:27:01,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:27:01,977 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:27:01,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:27:01,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:27:01,978 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:27:01,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:27:01,978 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:27:01,978 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93691 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:27:01,978 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/B is initiating minor compaction (all files) 2024-11-23T15:27:01,978 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/A is initiating minor compaction (all files) 2024-11-23T15:27:01,978 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/A in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,978 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/B in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,978 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/2da65abb4c1b459fae00630180784311, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e02a75576eaf4a63b356d2436ed49778, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/835b912cf6634022a0166b8bfcc4a748] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=91.5 K 2024-11-23T15:27:01,978 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6f34d7670cf54394a6808158cc92c446, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2f2506283c994a4aa2e389f3f1d09b88, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b7fb5d3b002c4c5c9abcce9c6fb99811] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=36.0 K 2024-11-23T15:27:01,978 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:01,979 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/2da65abb4c1b459fae00630180784311, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e02a75576eaf4a63b356d2436ed49778, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/835b912cf6634022a0166b8bfcc4a748] 2024-11-23T15:27:01,979 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2da65abb4c1b459fae00630180784311, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732375616789 2024-11-23T15:27:01,979 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f34d7670cf54394a6808158cc92c446, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732375616789 2024-11-23T15:27:01,979 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f2506283c994a4aa2e389f3f1d09b88, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732375617453 2024-11-23T15:27:01,979 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e02a75576eaf4a63b356d2436ed49778, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732375617453 2024-11-23T15:27:01,980 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting b7fb5d3b002c4c5c9abcce9c6fb99811, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732375619585 2024-11-23T15:27:01,980 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 835b912cf6634022a0166b8bfcc4a748, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732375619585 2024-11-23T15:27:01,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T15:27:01,987 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:01,989 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123b6541c403c0847eba28aab1edeef9af1_d02fcaab8ded3270abaa685bc6ecbc1d store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:01,989 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#B#compaction#459 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:27:01,989 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/2a4ab2a9185e4b0f8b09cc40f819a4c0 is 50, key is test_row_0/B:col10/1732375619585/Put/seqid=0 2024-11-23T15:27:01,990 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123b6541c403c0847eba28aab1edeef9af1_d02fcaab8ded3270abaa685bc6ecbc1d, store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:01,990 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123b6541c403c0847eba28aab1edeef9af1_d02fcaab8ded3270abaa685bc6ecbc1d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:02,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742368_1544 (size=12629) 2024-11-23T15:27:02,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742367_1543 (size=4469) 2024-11-23T15:27:02,010 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#A#compaction#458 average throughput is 1.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:27:02,011 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/65644bd77b954ff08eaa0119fe6d02e7 is 175, key is test_row_0/A:col10/1732375619585/Put/seqid=0 2024-11-23T15:27:02,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742369_1545 (size=31583) 2024-11-23T15:27:02,020 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/65644bd77b954ff08eaa0119fe6d02e7 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/65644bd77b954ff08eaa0119fe6d02e7 2024-11-23T15:27:02,024 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/A of d02fcaab8ded3270abaa685bc6ecbc1d into 65644bd77b954ff08eaa0119fe6d02e7(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:27:02,024 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:02,024 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/A, priority=13, startTime=1732375621977; duration=0sec 2024-11-23T15:27:02,024 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:27:02,024 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:A 2024-11-23T15:27:02,024 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:27:02,025 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:27:02,025 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/C is initiating minor compaction (all files) 2024-11-23T15:27:02,025 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/C in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:02,026 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/842081bb845d43abbc06fa49632e3d97, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/11579e633e2643c581b81305d31b9c6d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/eaa12009c5e548ceae211406b56557df] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=36.0 K 2024-11-23T15:27:02,026 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 842081bb845d43abbc06fa49632e3d97, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732375616789 2024-11-23T15:27:02,026 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11579e633e2643c581b81305d31b9c6d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732375617453 2024-11-23T15:27:02,026 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting eaa12009c5e548ceae211406b56557df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732375619585 2024-11-23T15:27:02,033 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#C#compaction#460 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:27:02,034 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/d9dea370ea4d4c718c0201b64c08f0ea is 50, key is test_row_0/C:col10/1732375619585/Put/seqid=0 2024-11-23T15:27:02,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742370_1546 (size=12629) 2024-11-23T15:27:02,102 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:02,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T15:27:02,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:02,103 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T15:27:02,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:27:02,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:02,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:27:02,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:02,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:27:02,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:02,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233225006cda194dfa8479f8200769ebc3_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375620756/Put/seqid=0 2024-11-23T15:27:02,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742371_1547 (size=12304) 2024-11-23T15:27:02,409 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/2a4ab2a9185e4b0f8b09cc40f819a4c0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2a4ab2a9185e4b0f8b09cc40f819a4c0 2024-11-23T15:27:02,413 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/B of d02fcaab8ded3270abaa685bc6ecbc1d into 2a4ab2a9185e4b0f8b09cc40f819a4c0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:27:02,413 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:02,413 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/B, priority=13, startTime=1732375621977; duration=0sec 2024-11-23T15:27:02,413 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:27:02,413 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:B 2024-11-23T15:27:02,442 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/d9dea370ea4d4c718c0201b64c08f0ea as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d9dea370ea4d4c718c0201b64c08f0ea 2024-11-23T15:27:02,446 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/C of d02fcaab8ded3270abaa685bc6ecbc1d into d9dea370ea4d4c718c0201b64c08f0ea(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:27:02,447 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:02,447 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/C, priority=13, startTime=1732375621978; duration=0sec 2024-11-23T15:27:02,447 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:27:02,447 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:C 2024-11-23T15:27:02,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:27:02,516 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233225006cda194dfa8479f8200769ebc3_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233225006cda194dfa8479f8200769ebc3_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:02,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/e4ec56632c11465fa220f8f87fa418df, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:02,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/e4ec56632c11465fa220f8f87fa418df is 175, key is test_row_0/A:col10/1732375620756/Put/seqid=0 2024-11-23T15:27:02,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742372_1548 (size=31105) 2024-11-23T15:27:02,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:27:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:02,921 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=233, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/e4ec56632c11465fa220f8f87fa418df 2024-11-23T15:27:02,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375682921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:02,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/f8e7374903074749aca7485da6b72547 is 50, key is test_row_0/B:col10/1732375620756/Put/seqid=0 2024-11-23T15:27:02,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742373_1549 (size=12151) 2024-11-23T15:27:02,933 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/f8e7374903074749aca7485da6b72547 2024-11-23T15:27:02,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/bbb48989f0864badb8d9a87d6fd5ff85 is 50, key is test_row_0/C:col10/1732375620756/Put/seqid=0 2024-11-23T15:27:02,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742374_1550 (size=12151) 2024-11-23T15:27:02,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T15:27:03,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375683023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:03,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375683226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:03,343 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/bbb48989f0864badb8d9a87d6fd5ff85 2024-11-23T15:27:03,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/e4ec56632c11465fa220f8f87fa418df as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e4ec56632c11465fa220f8f87fa418df 2024-11-23T15:27:03,350 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e4ec56632c11465fa220f8f87fa418df, entries=150, sequenceid=233, filesize=30.4 K 2024-11-23T15:27:03,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/f8e7374903074749aca7485da6b72547 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f8e7374903074749aca7485da6b72547 2024-11-23T15:27:03,354 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f8e7374903074749aca7485da6b72547, entries=150, sequenceid=233, filesize=11.9 K 2024-11-23T15:27:03,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/bbb48989f0864badb8d9a87d6fd5ff85 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/bbb48989f0864badb8d9a87d6fd5ff85 2024-11-23T15:27:03,357 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/bbb48989f0864badb8d9a87d6fd5ff85, entries=150, sequenceid=233, filesize=11.9 K 2024-11-23T15:27:03,358 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d02fcaab8ded3270abaa685bc6ecbc1d in 1255ms, sequenceid=233, compaction requested=false 2024-11-23T15:27:03,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:03,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:03,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-23T15:27:03,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-23T15:27:03,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-23T15:27:03,361 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4780 sec 2024-11-23T15:27:03,362 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.4820 sec 2024-11-23T15:27:03,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:03,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T15:27:03,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:27:03,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:03,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:27:03,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:03,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:27:03,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:03,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230f4e08d3372c431681038c0c7cb946cf_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375623531/Put/seqid=0 2024-11-23T15:27:03,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742375_1551 (size=14794) 2024-11-23T15:27:03,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:03,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375683580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:03,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:03,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375683682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:03,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:03,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375683886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:03,943 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:27:03,946 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230f4e08d3372c431681038c0c7cb946cf_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230f4e08d3372c431681038c0c7cb946cf_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:03,947 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/00b595a827964b71a8e109e920ea7336, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:03,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/00b595a827964b71a8e109e920ea7336 is 175, key is test_row_0/A:col10/1732375623531/Put/seqid=0 2024-11-23T15:27:03,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742376_1552 (size=39749) 2024-11-23T15:27:04,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:04,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375684189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:04,352 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/00b595a827964b71a8e109e920ea7336 2024-11-23T15:27:04,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/bf44a4d4ce174e959ab3c0f0cd154082 is 50, key is test_row_0/B:col10/1732375623531/Put/seqid=0 2024-11-23T15:27:04,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742377_1553 (size=12151) 2024-11-23T15:27:04,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:04,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375684691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:04,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/bf44a4d4ce174e959ab3c0f0cd154082 2024-11-23T15:27:04,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/cb548bc59dac4760a310297047a990a0 is 50, key is test_row_0/C:col10/1732375623531/Put/seqid=0 2024-11-23T15:27:04,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742378_1554 (size=12151) 2024-11-23T15:27:04,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T15:27:04,985 INFO [Thread-2189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-23T15:27:04,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T15:27:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-23T15:27:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T15:27:04,987 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T15:27:04,988 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T15:27:04,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T15:27:05,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T15:27:05,140 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:05,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-23T15:27:05,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:05,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:27:05,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:05,141 ERROR [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:05,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:05,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T15:27:05,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/cb548bc59dac4760a310297047a990a0 2024-11-23T15:27:05,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/00b595a827964b71a8e109e920ea7336 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/00b595a827964b71a8e109e920ea7336 2024-11-23T15:27:05,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/00b595a827964b71a8e109e920ea7336, entries=200, sequenceid=250, filesize=38.8 K 2024-11-23T15:27:05,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/bf44a4d4ce174e959ab3c0f0cd154082 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/bf44a4d4ce174e959ab3c0f0cd154082 2024-11-23T15:27:05,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/bf44a4d4ce174e959ab3c0f0cd154082, entries=150, sequenceid=250, filesize=11.9 K 2024-11-23T15:27:05,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/cb548bc59dac4760a310297047a990a0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/cb548bc59dac4760a310297047a990a0 2024-11-23T15:27:05,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/cb548bc59dac4760a310297047a990a0, entries=150, sequenceid=250, filesize=11.9 K 2024-11-23T15:27:05,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for d02fcaab8ded3270abaa685bc6ecbc1d in 1654ms, sequenceid=250, compaction requested=true 2024-11-23T15:27:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:27:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:27:05,186 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:27:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:27:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:27:05,186 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:27:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:27:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:27:05,187 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:27:05,187 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:27:05,187 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/B is initiating minor compaction (all files) 2024-11-23T15:27:05,187 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/A is initiating minor compaction (all files) 2024-11-23T15:27:05,187 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/B in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:05,187 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/A in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:05,188 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2a4ab2a9185e4b0f8b09cc40f819a4c0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f8e7374903074749aca7485da6b72547, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/bf44a4d4ce174e959ab3c0f0cd154082] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=36.1 K 2024-11-23T15:27:05,188 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/65644bd77b954ff08eaa0119fe6d02e7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e4ec56632c11465fa220f8f87fa418df, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/00b595a827964b71a8e109e920ea7336] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=100.0 K 2024-11-23T15:27:05,188 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:05,188 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. files: [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/65644bd77b954ff08eaa0119fe6d02e7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e4ec56632c11465fa220f8f87fa418df, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/00b595a827964b71a8e109e920ea7336] 2024-11-23T15:27:05,188 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a4ab2a9185e4b0f8b09cc40f819a4c0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732375619585 2024-11-23T15:27:05,188 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65644bd77b954ff08eaa0119fe6d02e7, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732375619585 2024-11-23T15:27:05,188 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting f8e7374903074749aca7485da6b72547, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732375620749 2024-11-23T15:27:05,188 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4ec56632c11465fa220f8f87fa418df, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732375620749 2024-11-23T15:27:05,189 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] compactions.Compactor(224): Compacting bf44a4d4ce174e959ab3c0f0cd154082, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732375622900 2024-11-23T15:27:05,189 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00b595a827964b71a8e109e920ea7336, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732375622900 2024-11-23T15:27:05,194 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:05,195 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#B#compaction#467 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:27:05,195 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/ec543413787948078a4ab1d57672f31d is 50, key is test_row_0/B:col10/1732375623531/Put/seqid=0 2024-11-23T15:27:05,196 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123d0a5d1e44ddd472fa5253eda7dbea699_d02fcaab8ded3270abaa685bc6ecbc1d store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:05,198 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123d0a5d1e44ddd472fa5253eda7dbea699_d02fcaab8ded3270abaa685bc6ecbc1d, store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:05,198 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d0a5d1e44ddd472fa5253eda7dbea699_d02fcaab8ded3270abaa685bc6ecbc1d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:05,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742379_1555 (size=12731) 2024-11-23T15:27:05,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742380_1556 (size=4469) 2024-11-23T15:27:05,203 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#A#compaction#468 average throughput is 2.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:27:05,203 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/934e362ecc4e4c1188cd156d65424cc0 is 175, key is test_row_0/A:col10/1732375623531/Put/seqid=0 2024-11-23T15:27:05,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742381_1557 (size=31685) 2024-11-23T15:27:05,210 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/934e362ecc4e4c1188cd156d65424cc0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/934e362ecc4e4c1188cd156d65424cc0 2024-11-23T15:27:05,213 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/A of d02fcaab8ded3270abaa685bc6ecbc1d into 934e362ecc4e4c1188cd156d65424cc0(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:27:05,213 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:05,213 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/A, priority=13, startTime=1732375625186; duration=0sec 2024-11-23T15:27:05,213 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:27:05,213 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:A 2024-11-23T15:27:05,213 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T15:27:05,214 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T15:27:05,214 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1540): d02fcaab8ded3270abaa685bc6ecbc1d/C is initiating minor compaction (all files) 2024-11-23T15:27:05,214 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d02fcaab8ded3270abaa685bc6ecbc1d/C in TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:05,214 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d9dea370ea4d4c718c0201b64c08f0ea, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/bbb48989f0864badb8d9a87d6fd5ff85, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/cb548bc59dac4760a310297047a990a0] into tmpdir=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp, totalSize=36.1 K 2024-11-23T15:27:05,215 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9dea370ea4d4c718c0201b64c08f0ea, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732375619585 2024-11-23T15:27:05,215 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbb48989f0864badb8d9a87d6fd5ff85, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732375620749 2024-11-23T15:27:05,215 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb548bc59dac4760a310297047a990a0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732375622900 2024-11-23T15:27:05,220 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d02fcaab8ded3270abaa685bc6ecbc1d#C#compaction#469 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T15:27:05,221 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/9328dfdeaab84442b9874165f69479c5 is 50, key is test_row_0/C:col10/1732375623531/Put/seqid=0 2024-11-23T15:27:05,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742382_1558 (size=12731) 2024-11-23T15:27:05,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T15:27:05,293 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:05,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33811 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-23T15:27:05,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:05,293 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-23T15:27:05,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:27:05,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:05,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:27:05,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:05,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:27:05,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:05,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237efc3ffcb7034bc291a484ffad0fa593_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375623577/Put/seqid=0 2024-11-23T15:27:05,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742383_1559 (size=12454) 2024-11-23T15:27:05,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T15:27:05,605 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/ec543413787948078a4ab1d57672f31d as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ec543413787948078a4ab1d57672f31d 2024-11-23T15:27:05,609 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/B of d02fcaab8ded3270abaa685bc6ecbc1d into ec543413787948078a4ab1d57672f31d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:27:05,609 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:05,609 INFO [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/B, priority=13, startTime=1732375625186; duration=0sec 2024-11-23T15:27:05,609 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:27:05,609 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:B 2024-11-23T15:27:05,628 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/9328dfdeaab84442b9874165f69479c5 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/9328dfdeaab84442b9874165f69479c5 2024-11-23T15:27:05,632 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d02fcaab8ded3270abaa685bc6ecbc1d/C of d02fcaab8ded3270abaa685bc6ecbc1d into 9328dfdeaab84442b9874165f69479c5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T15:27:05,632 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:05,632 INFO [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d., storeName=d02fcaab8ded3270abaa685bc6ecbc1d/C, priority=13, startTime=1732375625186; duration=0sec 2024-11-23T15:27:05,632 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:27:05,632 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:C 2024-11-23T15:27:05,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:05,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. as already flushing 2024-11-23T15:27:05,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:27:05,707 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237efc3ffcb7034bc291a484ffad0fa593_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237efc3ffcb7034bc291a484ffad0fa593_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:05,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/fc02d474846746df81c3a58a666bb587, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:05,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/fc02d474846746df81c3a58a666bb587 is 175, key is test_row_0/A:col10/1732375623577/Put/seqid=0 2024-11-23T15:27:05,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742384_1560 (size=31255) 2024-11-23T15:27:05,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:05,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375685719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:05,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375685821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:06,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:06,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375686025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T15:27:06,112 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=272, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/fc02d474846746df81c3a58a666bb587 2024-11-23T15:27:06,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/5cba13d1c35a4710856ae7575a24bcbe is 50, key is test_row_0/B:col10/1732375623577/Put/seqid=0 2024-11-23T15:27:06,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742385_1561 (size=12301) 2024-11-23T15:27:06,121 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/5cba13d1c35a4710856ae7575a24bcbe 2024-11-23T15:27:06,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/3440b3a26e374830b445645d6492f483 is 50, key is test_row_0/C:col10/1732375623577/Put/seqid=0 2024-11-23T15:27:06,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742386_1562 (size=12301) 2024-11-23T15:27:06,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T15:27:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33811 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51048 deadline: 1732375686327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 2024-11-23T15:27:06,476 DEBUG [Thread-2192 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x150e08ed to 127.0.0.1:62881 2024-11-23T15:27:06,476 DEBUG [Thread-2190 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f6a59e4 to 127.0.0.1:62881 2024-11-23T15:27:06,476 DEBUG [Thread-2192 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:06,476 DEBUG [Thread-2190 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:06,477 DEBUG [Thread-2198 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68c2838a to 127.0.0.1:62881 2024-11-23T15:27:06,477 DEBUG [Thread-2198 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:06,477 DEBUG [Thread-2196 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5cfdf76c to 127.0.0.1:62881 2024-11-23T15:27:06,477 DEBUG [Thread-2196 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:06,478 DEBUG [Thread-2194 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a3b66d3 to 127.0.0.1:62881 2024-11-23T15:27:06,478 DEBUG [Thread-2194 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:06,531 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/3440b3a26e374830b445645d6492f483 2024-11-23T15:27:06,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/fc02d474846746df81c3a58a666bb587 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/fc02d474846746df81c3a58a666bb587 2024-11-23T15:27:06,537 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/fc02d474846746df81c3a58a666bb587, entries=150, sequenceid=272, filesize=30.5 K 2024-11-23T15:27:06,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/5cba13d1c35a4710856ae7575a24bcbe as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/5cba13d1c35a4710856ae7575a24bcbe 2024-11-23T15:27:06,541 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/5cba13d1c35a4710856ae7575a24bcbe, entries=150, sequenceid=272, filesize=12.0 K 2024-11-23T15:27:06,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/3440b3a26e374830b445645d6492f483 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/3440b3a26e374830b445645d6492f483 2024-11-23T15:27:06,543 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/3440b3a26e374830b445645d6492f483, entries=150, sequenceid=272, filesize=12.0 K 2024-11-23T15:27:06,544 INFO [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for d02fcaab8ded3270abaa685bc6ecbc1d in 1251ms, sequenceid=272, compaction requested=false 2024-11-23T15:27:06,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:06,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:06,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6a36843bf905:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-23T15:27:06,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-23T15:27:06,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-23T15:27:06,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5570 sec 2024-11-23T15:27:06,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.5600 sec 2024-11-23T15:27:06,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33811 {}] regionserver.HRegion(8581): Flush requested on d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:06,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-23T15:27:06,833 DEBUG [Thread-2179 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ac53e79 to 127.0.0.1:62881 2024-11-23T15:27:06,833 DEBUG [Thread-2179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:06,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:27:06,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:06,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:27:06,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:06,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:27:06,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:06,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112343d1db7ccb47467d9c9f61a964bdd39b_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_0/A:col10/1732375625702/Put/seqid=0 2024-11-23T15:27:06,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742387_1563 (size=12454) 2024-11-23T15:27:06,920 DEBUG [Thread-2185 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11030ef5 to 127.0.0.1:62881 2024-11-23T15:27:06,920 DEBUG [Thread-2185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:06,933 DEBUG [Thread-2183 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7181df3b to 127.0.0.1:62881 2024-11-23T15:27:06,934 DEBUG [Thread-2183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:06,938 DEBUG [Thread-2187 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x69abefea to 127.0.0.1:62881 2024-11-23T15:27:06,938 DEBUG [Thread-2187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:06,942 DEBUG [Thread-2181 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05bc9c3e to 127.0.0.1:62881 2024-11-23T15:27:06,942 DEBUG [Thread-2181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:07,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T15:27:07,091 INFO [Thread-2189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-23T15:27:07,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T15:27:07,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 171 2024-11-23T15:27:07,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 17 2024-11-23T15:27:07,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 10 2024-11-23T15:27:07,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 13 2024-11-23T15:27:07,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 19 2024-11-23T15:27:07,091 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T15:27:07,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7650 2024-11-23T15:27:07,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7453 2024-11-23T15:27:07,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7448 2024-11-23T15:27:07,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7667 2024-11-23T15:27:07,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7443 2024-11-23T15:27:07,092 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T15:27:07,092 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T15:27:07,092 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5765d46a to 127.0.0.1:62881 2024-11-23T15:27:07,092 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:07,092 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T15:27:07,092 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T15:27:07,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T15:27:07,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T15:27:07,095 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375627095"}]},"ts":"1732375627095"} 2024-11-23T15:27:07,095 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T15:27:07,097 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T15:27:07,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T15:27:07,099 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, UNASSIGN}] 2024-11-23T15:27:07,099 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, UNASSIGN 2024-11-23T15:27:07,100 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=d02fcaab8ded3270abaa685bc6ecbc1d, regionState=CLOSING, regionLocation=6a36843bf905,33811,1732375456985 2024-11-23T15:27:07,100 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T15:27:07,100 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; CloseRegionProcedure d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985}] 2024-11-23T15:27:07,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T15:27:07,242 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:27:07,245 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112343d1db7ccb47467d9c9f61a964bdd39b_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112343d1db7ccb47467d9c9f61a964bdd39b_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:07,245 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/b3dc789169b547c4830a4c387dc6ac1b, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:07,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/b3dc789169b547c4830a4c387dc6ac1b is 175, key is test_row_0/A:col10/1732375625702/Put/seqid=0 2024-11-23T15:27:07,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742388_1564 (size=31255) 2024-11-23T15:27:07,251 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6a36843bf905,33811,1732375456985 2024-11-23T15:27:07,252 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(124): Close d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:07,252 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T15:27:07,252 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1681): Closing d02fcaab8ded3270abaa685bc6ecbc1d, disabling compactions & flushes 2024-11-23T15:27:07,252 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:07,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T15:27:07,650 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/b3dc789169b547c4830a4c387dc6ac1b 2024-11-23T15:27:07,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/4549e43f72b04e288b7ac219140fd855 is 50, key is test_row_0/B:col10/1732375625702/Put/seqid=0 2024-11-23T15:27:07,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742389_1565 (size=12301) 2024-11-23T15:27:07,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T15:27:08,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/4549e43f72b04e288b7ac219140fd855 2024-11-23T15:27:08,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/5b5557bd5914455faba2e26e997540d8 is 50, key is test_row_0/C:col10/1732375625702/Put/seqid=0 2024-11-23T15:27:08,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742390_1566 (size=12301) 2024-11-23T15:27:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T15:27:08,467 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/5b5557bd5914455faba2e26e997540d8 2024-11-23T15:27:08,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/b3dc789169b547c4830a4c387dc6ac1b as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b3dc789169b547c4830a4c387dc6ac1b 2024-11-23T15:27:08,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b3dc789169b547c4830a4c387dc6ac1b, entries=150, sequenceid=290, filesize=30.5 K 2024-11-23T15:27:08,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/4549e43f72b04e288b7ac219140fd855 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/4549e43f72b04e288b7ac219140fd855 2024-11-23T15:27:08,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/4549e43f72b04e288b7ac219140fd855, entries=150, sequenceid=290, filesize=12.0 K 2024-11-23T15:27:08,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/5b5557bd5914455faba2e26e997540d8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/5b5557bd5914455faba2e26e997540d8 2024-11-23T15:27:08,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/5b5557bd5914455faba2e26e997540d8, entries=150, sequenceid=290, filesize=12.0 K 2024-11-23T15:27:08,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=26.84 KB/27480 for d02fcaab8ded3270abaa685bc6ecbc1d in 1646ms, sequenceid=290, compaction requested=true 2024-11-23T15:27:08,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:08,479 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:08,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T15:27:08,479 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:08,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:27:08,479 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. because compaction request was cancelled 2024-11-23T15:27:08,479 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. after waiting 0 ms 2024-11-23T15:27:08,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T15:27:08,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T15:27:08,479 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:A 2024-11-23T15:27:08,479 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:08,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d02fcaab8ded3270abaa685bc6ecbc1d:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T15:27:08,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T15:27:08,480 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. because compaction request was cancelled 2024-11-23T15:27:08,480 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. because compaction request was cancelled 2024-11-23T15:27:08,480 DEBUG [RS:0;6a36843bf905:33811-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:B 2024-11-23T15:27:08,480 DEBUG [RS:0;6a36843bf905:33811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d02fcaab8ded3270abaa685bc6ecbc1d:C 2024-11-23T15:27:08,480 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(2837): Flushing d02fcaab8ded3270abaa685bc6ecbc1d 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-23T15:27:08,480 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=A 2024-11-23T15:27:08,480 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:08,480 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=B 2024-11-23T15:27:08,480 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:08,480 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d02fcaab8ded3270abaa685bc6ecbc1d, store=C 2024-11-23T15:27:08,480 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T15:27:08,484 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123156157dee2ed4e9b9a13e1c264f314b2_d02fcaab8ded3270abaa685bc6ecbc1d is 50, key is test_row_1/A:col10/1732375626941/Put/seqid=0 2024-11-23T15:27:08,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742391_1567 (size=9914) 2024-11-23T15:27:08,888 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T15:27:08,891 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123156157dee2ed4e9b9a13e1c264f314b2_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123156157dee2ed4e9b9a13e1c264f314b2_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:08,891 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/ebcd9d653fb741f092e5394ec246b29e, store: [table=TestAcidGuarantees family=A region=d02fcaab8ded3270abaa685bc6ecbc1d] 2024-11-23T15:27:08,892 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/ebcd9d653fb741f092e5394ec246b29e is 175, key is test_row_1/A:col10/1732375626941/Put/seqid=0 2024-11-23T15:27:08,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742392_1568 (size=22561) 2024-11-23T15:27:09,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T15:27:09,296 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=297, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/ebcd9d653fb741f092e5394ec246b29e 2024-11-23T15:27:09,301 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/147fe223b5354f68aab311f5cabcc9b5 is 50, key is test_row_1/B:col10/1732375626941/Put/seqid=0 2024-11-23T15:27:09,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742393_1569 (size=9857) 2024-11-23T15:27:09,704 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/147fe223b5354f68aab311f5cabcc9b5 2024-11-23T15:27:09,709 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/9f935265be6c4815b9305e5dbeccef62 is 50, key is test_row_1/C:col10/1732375626941/Put/seqid=0 2024-11-23T15:27:09,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742394_1570 (size=9857) 2024-11-23T15:27:10,112 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/9f935265be6c4815b9305e5dbeccef62 2024-11-23T15:27:10,115 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/A/ebcd9d653fb741f092e5394ec246b29e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ebcd9d653fb741f092e5394ec246b29e 2024-11-23T15:27:10,118 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ebcd9d653fb741f092e5394ec246b29e, entries=100, sequenceid=297, filesize=22.0 K 2024-11-23T15:27:10,118 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/B/147fe223b5354f68aab311f5cabcc9b5 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/147fe223b5354f68aab311f5cabcc9b5 2024-11-23T15:27:10,121 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/147fe223b5354f68aab311f5cabcc9b5, entries=100, sequenceid=297, filesize=9.6 K 2024-11-23T15:27:10,121 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/.tmp/C/9f935265be6c4815b9305e5dbeccef62 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/9f935265be6c4815b9305e5dbeccef62 2024-11-23T15:27:10,124 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/9f935265be6c4815b9305e5dbeccef62, entries=100, sequenceid=297, filesize=9.6 K 2024-11-23T15:27:10,124 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for d02fcaab8ded3270abaa685bc6ecbc1d in 1644ms, sequenceid=297, compaction requested=true 2024-11-23T15:27:10,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/714f3c6e690349c6937d51468be1a5ac, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/269f42b645204ab48878e0586d7b8874, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/af06d13b37c14ae5baeed14bb2eb6fdf, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b79cba593a8b4721a5b4004c2e3c1f21, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/7059af4257b24aab8cf6f852bcc9977a, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/197224406483403aa537a735f53ca20b, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/0b03c46e332c4fcdacb12faec47226bc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/9b6458285d844c05b1cdc109ff9f9955, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/a90ac3a958fa46e896edff57e00108f7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ffabb2b2dfee40f4841db0faa3d6b6f3, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/2da65abb4c1b459fae00630180784311, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/dbcf26a8eea4486d8d3373e9cea7bb75, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e02a75576eaf4a63b356d2436ed49778, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/65644bd77b954ff08eaa0119fe6d02e7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/835b912cf6634022a0166b8bfcc4a748, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e4ec56632c11465fa220f8f87fa418df, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/00b595a827964b71a8e109e920ea7336] to archive 2024-11-23T15:27:10,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:27:10,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/714f3c6e690349c6937d51468be1a5ac to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/714f3c6e690349c6937d51468be1a5ac 2024-11-23T15:27:10,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/269f42b645204ab48878e0586d7b8874 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/269f42b645204ab48878e0586d7b8874 2024-11-23T15:27:10,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/af06d13b37c14ae5baeed14bb2eb6fdf to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/af06d13b37c14ae5baeed14bb2eb6fdf 2024-11-23T15:27:10,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b79cba593a8b4721a5b4004c2e3c1f21 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b79cba593a8b4721a5b4004c2e3c1f21 2024-11-23T15:27:10,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/7059af4257b24aab8cf6f852bcc9977a to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/7059af4257b24aab8cf6f852bcc9977a 2024-11-23T15:27:10,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/197224406483403aa537a735f53ca20b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/197224406483403aa537a735f53ca20b 2024-11-23T15:27:10,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/0b03c46e332c4fcdacb12faec47226bc to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/0b03c46e332c4fcdacb12faec47226bc 2024-11-23T15:27:10,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/9b6458285d844c05b1cdc109ff9f9955 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/9b6458285d844c05b1cdc109ff9f9955 2024-11-23T15:27:10,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/a90ac3a958fa46e896edff57e00108f7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/a90ac3a958fa46e896edff57e00108f7 2024-11-23T15:27:10,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ffabb2b2dfee40f4841db0faa3d6b6f3 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ffabb2b2dfee40f4841db0faa3d6b6f3 2024-11-23T15:27:10,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/2da65abb4c1b459fae00630180784311 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/2da65abb4c1b459fae00630180784311 2024-11-23T15:27:10,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/dbcf26a8eea4486d8d3373e9cea7bb75 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/dbcf26a8eea4486d8d3373e9cea7bb75 2024-11-23T15:27:10,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e02a75576eaf4a63b356d2436ed49778 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e02a75576eaf4a63b356d2436ed49778 2024-11-23T15:27:10,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/65644bd77b954ff08eaa0119fe6d02e7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/65644bd77b954ff08eaa0119fe6d02e7 2024-11-23T15:27:10,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/835b912cf6634022a0166b8bfcc4a748 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/835b912cf6634022a0166b8bfcc4a748 2024-11-23T15:27:10,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e4ec56632c11465fa220f8f87fa418df to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/e4ec56632c11465fa220f8f87fa418df 2024-11-23T15:27:10,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/00b595a827964b71a8e109e920ea7336 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/00b595a827964b71a8e109e920ea7336 2024-11-23T15:27:10,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/5e9d3eb70e5e4f9c90492ba70cf1dd45, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f25061395c2d4398b67fa5f0a0a3d9c4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b83522acadd44acf82c8a28b8da64ff7, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ecc739201ea04d1e9d3a3bac29bde2d1, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ff4310f88c924b05bf4bdb774ae6e6eb, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/83fbda0e71604c71aa22d525dcfd799d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/590e0c43d941418c8be6fbaa69cd8813, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/7cdf2fab64944efbbd226f7b210d4027, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/af57329485fe41c09cbf66f920caa2cc, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/a914d376348541e0b17e9f2c02fd5322, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6f34d7670cf54394a6808158cc92c446, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6a1228eb2c584a55aca8fbf17af033a4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2f2506283c994a4aa2e389f3f1d09b88, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2a4ab2a9185e4b0f8b09cc40f819a4c0, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b7fb5d3b002c4c5c9abcce9c6fb99811, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f8e7374903074749aca7485da6b72547, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/bf44a4d4ce174e959ab3c0f0cd154082] to archive 2024-11-23T15:27:10,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:27:10,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/5e9d3eb70e5e4f9c90492ba70cf1dd45 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/5e9d3eb70e5e4f9c90492ba70cf1dd45 2024-11-23T15:27:10,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f25061395c2d4398b67fa5f0a0a3d9c4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f25061395c2d4398b67fa5f0a0a3d9c4 2024-11-23T15:27:10,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b83522acadd44acf82c8a28b8da64ff7 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b83522acadd44acf82c8a28b8da64ff7 2024-11-23T15:27:10,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ecc739201ea04d1e9d3a3bac29bde2d1 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ecc739201ea04d1e9d3a3bac29bde2d1 2024-11-23T15:27:10,145 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ff4310f88c924b05bf4bdb774ae6e6eb to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ff4310f88c924b05bf4bdb774ae6e6eb 2024-11-23T15:27:10,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/83fbda0e71604c71aa22d525dcfd799d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/83fbda0e71604c71aa22d525dcfd799d 2024-11-23T15:27:10,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/590e0c43d941418c8be6fbaa69cd8813 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/590e0c43d941418c8be6fbaa69cd8813 2024-11-23T15:27:10,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/7cdf2fab64944efbbd226f7b210d4027 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/7cdf2fab64944efbbd226f7b210d4027 2024-11-23T15:27:10,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/af57329485fe41c09cbf66f920caa2cc to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/af57329485fe41c09cbf66f920caa2cc 2024-11-23T15:27:10,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/a914d376348541e0b17e9f2c02fd5322 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/a914d376348541e0b17e9f2c02fd5322 2024-11-23T15:27:10,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6f34d7670cf54394a6808158cc92c446 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6f34d7670cf54394a6808158cc92c446 2024-11-23T15:27:10,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6a1228eb2c584a55aca8fbf17af033a4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/6a1228eb2c584a55aca8fbf17af033a4 2024-11-23T15:27:10,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2f2506283c994a4aa2e389f3f1d09b88 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2f2506283c994a4aa2e389f3f1d09b88 2024-11-23T15:27:10,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2a4ab2a9185e4b0f8b09cc40f819a4c0 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/2a4ab2a9185e4b0f8b09cc40f819a4c0 2024-11-23T15:27:10,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b7fb5d3b002c4c5c9abcce9c6fb99811 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/b7fb5d3b002c4c5c9abcce9c6fb99811 2024-11-23T15:27:10,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f8e7374903074749aca7485da6b72547 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/f8e7374903074749aca7485da6b72547 2024-11-23T15:27:10,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/bf44a4d4ce174e959ab3c0f0cd154082 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/bf44a4d4ce174e959ab3c0f0cd154082 2024-11-23T15:27:10,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f7b81576582648a7b6dcfaf441d990b4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/dda747904cd54f38831710ed3f04a235, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/43f5fc425eb643b8bce2b829e1a8ecba, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d24a3ad18c0447bf8fef1d89a4d8bb9e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/db0f0d73cb0b4caa9d80aebf4497d5b9, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/2c2f49420add4d6e9976452ae43b3b1e, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/15c281dea3fd45e0bebed93c544c1827, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/25b2655a548b4782b46d33d829a9fb17, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f56e16dbe1904944a5749768dcdd1ea6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/c9298c0ac12840edb9ad2ff6cc93b8a6, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/842081bb845d43abbc06fa49632e3d97, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/0677011a7e0d4ddd94d7375bd15860b4, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/11579e633e2643c581b81305d31b9c6d, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d9dea370ea4d4c718c0201b64c08f0ea, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/eaa12009c5e548ceae211406b56557df, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/bbb48989f0864badb8d9a87d6fd5ff85, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/cb548bc59dac4760a310297047a990a0] to archive 2024-11-23T15:27:10,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T15:27:10,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f7b81576582648a7b6dcfaf441d990b4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f7b81576582648a7b6dcfaf441d990b4 2024-11-23T15:27:10,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/dda747904cd54f38831710ed3f04a235 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/dda747904cd54f38831710ed3f04a235 2024-11-23T15:27:10,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/43f5fc425eb643b8bce2b829e1a8ecba to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/43f5fc425eb643b8bce2b829e1a8ecba 2024-11-23T15:27:10,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d24a3ad18c0447bf8fef1d89a4d8bb9e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d24a3ad18c0447bf8fef1d89a4d8bb9e 2024-11-23T15:27:10,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/db0f0d73cb0b4caa9d80aebf4497d5b9 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/db0f0d73cb0b4caa9d80aebf4497d5b9 2024-11-23T15:27:10,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/2c2f49420add4d6e9976452ae43b3b1e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/2c2f49420add4d6e9976452ae43b3b1e 2024-11-23T15:27:10,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/15c281dea3fd45e0bebed93c544c1827 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/15c281dea3fd45e0bebed93c544c1827 2024-11-23T15:27:10,165 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/25b2655a548b4782b46d33d829a9fb17 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/25b2655a548b4782b46d33d829a9fb17 2024-11-23T15:27:10,166 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f56e16dbe1904944a5749768dcdd1ea6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/f56e16dbe1904944a5749768dcdd1ea6 2024-11-23T15:27:10,166 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/c9298c0ac12840edb9ad2ff6cc93b8a6 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/c9298c0ac12840edb9ad2ff6cc93b8a6 2024-11-23T15:27:10,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/842081bb845d43abbc06fa49632e3d97 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/842081bb845d43abbc06fa49632e3d97 2024-11-23T15:27:10,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/0677011a7e0d4ddd94d7375bd15860b4 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/0677011a7e0d4ddd94d7375bd15860b4 2024-11-23T15:27:10,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/11579e633e2643c581b81305d31b9c6d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/11579e633e2643c581b81305d31b9c6d 2024-11-23T15:27:10,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d9dea370ea4d4c718c0201b64c08f0ea to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/d9dea370ea4d4c718c0201b64c08f0ea 2024-11-23T15:27:10,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/eaa12009c5e548ceae211406b56557df to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/eaa12009c5e548ceae211406b56557df 2024-11-23T15:27:10,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/bbb48989f0864badb8d9a87d6fd5ff85 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/bbb48989f0864badb8d9a87d6fd5ff85 2024-11-23T15:27:10,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/cb548bc59dac4760a310297047a990a0 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/cb548bc59dac4760a310297047a990a0 2024-11-23T15:27:10,175 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/recovered.edits/300.seqid, newMaxSeqId=300, maxSeqId=4 2024-11-23T15:27:10,175 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d. 2024-11-23T15:27:10,175 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1635): Region close journal for d02fcaab8ded3270abaa685bc6ecbc1d: 2024-11-23T15:27:10,176 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(170): Closed d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:10,177 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=d02fcaab8ded3270abaa685bc6ecbc1d, regionState=CLOSED 2024-11-23T15:27:10,178 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-23T15:27:10,178 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; CloseRegionProcedure d02fcaab8ded3270abaa685bc6ecbc1d, server=6a36843bf905,33811,1732375456985 in 3.0770 sec 2024-11-23T15:27:10,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-11-23T15:27:10,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d02fcaab8ded3270abaa685bc6ecbc1d, UNASSIGN in 3.0790 sec 2024-11-23T15:27:10,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-23T15:27:10,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 3.0810 sec 2024-11-23T15:27:10,181 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732375630181"}]},"ts":"1732375630181"} 2024-11-23T15:27:10,182 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T15:27:10,183 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T15:27:10,184 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 3.0910 sec 2024-11-23T15:27:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T15:27:11,199 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-23T15:27:11,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T15:27:11,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:27:11,200 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:27:11,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T15:27:11,201 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=173, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:27:11,203 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,204 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C, FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/recovered.edits] 2024-11-23T15:27:11,206 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/934e362ecc4e4c1188cd156d65424cc0 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/934e362ecc4e4c1188cd156d65424cc0 2024-11-23T15:27:11,207 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b3dc789169b547c4830a4c387dc6ac1b to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/b3dc789169b547c4830a4c387dc6ac1b 2024-11-23T15:27:11,208 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ebcd9d653fb741f092e5394ec246b29e to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/ebcd9d653fb741f092e5394ec246b29e 2024-11-23T15:27:11,208 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/fc02d474846746df81c3a58a666bb587 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/A/fc02d474846746df81c3a58a666bb587 2024-11-23T15:27:11,210 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/147fe223b5354f68aab311f5cabcc9b5 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/147fe223b5354f68aab311f5cabcc9b5 2024-11-23T15:27:11,211 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/4549e43f72b04e288b7ac219140fd855 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/4549e43f72b04e288b7ac219140fd855 2024-11-23T15:27:11,212 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/5cba13d1c35a4710856ae7575a24bcbe to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/5cba13d1c35a4710856ae7575a24bcbe 2024-11-23T15:27:11,213 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ec543413787948078a4ab1d57672f31d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/B/ec543413787948078a4ab1d57672f31d 2024-11-23T15:27:11,215 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/3440b3a26e374830b445645d6492f483 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/3440b3a26e374830b445645d6492f483 2024-11-23T15:27:11,215 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/5b5557bd5914455faba2e26e997540d8 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/5b5557bd5914455faba2e26e997540d8 2024-11-23T15:27:11,216 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/9328dfdeaab84442b9874165f69479c5 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/9328dfdeaab84442b9874165f69479c5 2024-11-23T15:27:11,217 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/9f935265be6c4815b9305e5dbeccef62 to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/C/9f935265be6c4815b9305e5dbeccef62 2024-11-23T15:27:11,219 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/recovered.edits/300.seqid to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d/recovered.edits/300.seqid 2024-11-23T15:27:11,219 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/default/TestAcidGuarantees/d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,219 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T15:27:11,220 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T15:27:11,220 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-23T15:27:11,222 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230f4e08d3372c431681038c0c7cb946cf_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230f4e08d3372c431681038c0c7cb946cf_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,223 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123156157dee2ed4e9b9a13e1c264f314b2_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123156157dee2ed4e9b9a13e1c264f314b2_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,224 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232a045e5f8beb466487e92d24f4b19334_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232a045e5f8beb466487e92d24f4b19334_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,225 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233225006cda194dfa8479f8200769ebc3_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233225006cda194dfa8479f8200769ebc3_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,225 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233d62c4f4936e4179b815be5fdb4f3718_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233d62c4f4936e4179b815be5fdb4f3718_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,226 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233fb22b2564734b7c9ae402139e6af1e6_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233fb22b2564734b7c9ae402139e6af1e6_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,227 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112343d1db7ccb47467d9c9f61a964bdd39b_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112343d1db7ccb47467d9c9f61a964bdd39b_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,228 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411234f9bbbb123394774bba9e7daf4ad8f3e_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411234f9bbbb123394774bba9e7daf4ad8f3e_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,229 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112354755841ccb6422893a6faa29cf7e30e_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112354755841ccb6422893a6faa29cf7e30e_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,229 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112355add34463d148e790519c8623228ec5_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112355add34463d148e790519c8623228ec5_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,232 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237efc3ffcb7034bc291a484ffad0fa593_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237efc3ffcb7034bc291a484ffad0fa593_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,233 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112389f9dc607588482b911c74cff84b2fb3_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112389f9dc607588482b911c74cff84b2fb3_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,234 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238d577b2a587b4b7ebf35b85a4ac01fb5_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411238d577b2a587b4b7ebf35b85a4ac01fb5_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,234 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239ab7bf137eef4b59a5bb4397228000be_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239ab7bf137eef4b59a5bb4397228000be_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,235 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d91e2e99e04944fc87bf9f50454087f8_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123d91e2e99e04944fc87bf9f50454087f8_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,236 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e4f3a6c1fc944cc8b6e03ae302fc3b81_d02fcaab8ded3270abaa685bc6ecbc1d to hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e4f3a6c1fc944cc8b6e03ae302fc3b81_d02fcaab8ded3270abaa685bc6ecbc1d 2024-11-23T15:27:11,236 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T15:27:11,238 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=173, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:27:11,240 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T15:27:11,241 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T15:27:11,242 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=173, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:27:11,242 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T15:27:11,242 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732375631242"}]},"ts":"9223372036854775807"} 2024-11-23T15:27:11,244 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T15:27:11,244 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d02fcaab8ded3270abaa685bc6ecbc1d, NAME => 'TestAcidGuarantees,,1732375603393.d02fcaab8ded3270abaa685bc6ecbc1d.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T15:27:11,244 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T15:27:11,244 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732375631244"}]},"ts":"9223372036854775807"} 2024-11-23T15:27:11,245 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T15:27:11,247 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=173, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T15:27:11,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 48 msec 2024-11-23T15:27:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35555 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T15:27:11,302 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-23T15:27:11,311 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=239 (was 239), OpenFileDescriptor=449 (was 451), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=304 (was 277) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3663 (was 3675) 2024-11-23T15:27:11,311 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-23T15:27:11,311 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T15:27:11,311 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e83c466 to 127.0.0.1:62881 2024-11-23T15:27:11,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:11,311 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T15:27:11,311 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=633883069, stopped=false 2024-11-23T15:27:11,312 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=6a36843bf905,35555,1732375456260 2024-11-23T15:27:11,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T15:27:11,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T15:27:11,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:27:11,313 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-23T15:27:11,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:27:11,314 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:11,314 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6a36843bf905,33811,1732375456985' ***** 2024-11-23T15:27:11,314 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-23T15:27:11,314 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:27:11,314 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T15:27:11,314 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T15:27:11,314 INFO [RS:0;6a36843bf905:33811 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T15:27:11,314 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-23T15:27:11,314 INFO [RS:0;6a36843bf905:33811 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T15:27:11,314 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(3579): Received CLOSE for 7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:27:11,315 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1224): stopping server 6a36843bf905,33811,1732375456985 2024-11-23T15:27:11,315 DEBUG [RS:0;6a36843bf905:33811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:11,315 INFO [RS:0;6a36843bf905:33811 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T15:27:11,315 INFO [RS:0;6a36843bf905:33811 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T15:27:11,315 INFO [RS:0;6a36843bf905:33811 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T15:27:11,315 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-23T15:27:11,315 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 7c4a395faaf6c8f523e4a2ccca6ed0d7, disabling compactions & flushes 2024-11-23T15:27:11,315 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:27:11,315 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:27:11,315 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. after waiting 0 ms 2024-11-23T15:27:11,315 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:27:11,315 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 7c4a395faaf6c8f523e4a2ccca6ed0d7 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-23T15:27:11,315 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-23T15:27:11,315 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 7c4a395faaf6c8f523e4a2ccca6ed0d7=hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7.} 2024-11-23T15:27:11,315 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-23T15:27:11,316 INFO [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-23T15:27:11,316 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-23T15:27:11,316 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T15:27:11,316 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T15:27:11,316 INFO [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-23T15:27:11,316 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:27:11,331 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/namespace/7c4a395faaf6c8f523e4a2ccca6ed0d7/.tmp/info/1e946c8e8ba64705a77103c1bcfed27e is 45, key is default/info:d/1732375461532/Put/seqid=0 2024-11-23T15:27:11,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742395_1571 (size=5037) 2024-11-23T15:27:11,338 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/.tmp/info/eb085f239cd84cfc8ee4c74f6694dbf0 is 143, key is hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7./info:regioninfo/1732375461411/Put/seqid=0 2024-11-23T15:27:11,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742396_1572 (size=7725) 2024-11-23T15:27:11,355 INFO [regionserver/6a36843bf905:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:27:11,516 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:27:11,717 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 7c4a395faaf6c8f523e4a2ccca6ed0d7 2024-11-23T15:27:11,735 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/namespace/7c4a395faaf6c8f523e4a2ccca6ed0d7/.tmp/info/1e946c8e8ba64705a77103c1bcfed27e 2024-11-23T15:27:11,739 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/namespace/7c4a395faaf6c8f523e4a2ccca6ed0d7/.tmp/info/1e946c8e8ba64705a77103c1bcfed27e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/namespace/7c4a395faaf6c8f523e4a2ccca6ed0d7/info/1e946c8e8ba64705a77103c1bcfed27e 2024-11-23T15:27:11,741 INFO [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/.tmp/info/eb085f239cd84cfc8ee4c74f6694dbf0 2024-11-23T15:27:11,742 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/namespace/7c4a395faaf6c8f523e4a2ccca6ed0d7/info/1e946c8e8ba64705a77103c1bcfed27e, entries=2, sequenceid=6, filesize=4.9 K 2024-11-23T15:27:11,743 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 7c4a395faaf6c8f523e4a2ccca6ed0d7 in 428ms, sequenceid=6, compaction requested=false 2024-11-23T15:27:11,746 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/namespace/7c4a395faaf6c8f523e4a2ccca6ed0d7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T15:27:11,746 INFO [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:27:11,746 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 7c4a395faaf6c8f523e4a2ccca6ed0d7: 2024-11-23T15:27:11,746 DEBUG [RS_CLOSE_REGION-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732375460179.7c4a395faaf6c8f523e4a2ccca6ed0d7. 2024-11-23T15:27:11,759 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/.tmp/rep_barrier/df18c77a5e824a2f9cd68a82c11ec0eb is 102, key is TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d./rep_barrier:/1732375488076/DeleteFamily/seqid=0 2024-11-23T15:27:11,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742397_1573 (size=6025) 2024-11-23T15:27:11,879 INFO [regionserver/6a36843bf905:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T15:27:11,879 INFO [regionserver/6a36843bf905:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T15:27:11,917 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-23T15:27:12,117 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-23T15:27:12,162 INFO [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/.tmp/rep_barrier/df18c77a5e824a2f9cd68a82c11ec0eb 2024-11-23T15:27:12,180 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/.tmp/table/6f7525aeca08469dbd172f12b69aba7e is 96, key is TestAcidGuarantees,,1732375461727.d6bd711ee7b1117306956b276de6b58d./table:/1732375488076/DeleteFamily/seqid=0 2024-11-23T15:27:12,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742398_1574 (size=5942) 2024-11-23T15:27:12,317 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-23T15:27:12,317 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-23T15:27:12,318 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-23T15:27:12,518 DEBUG [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-23T15:27:12,583 INFO [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/.tmp/table/6f7525aeca08469dbd172f12b69aba7e 2024-11-23T15:27:12,587 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/.tmp/info/eb085f239cd84cfc8ee4c74f6694dbf0 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/info/eb085f239cd84cfc8ee4c74f6694dbf0 2024-11-23T15:27:12,589 INFO [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/info/eb085f239cd84cfc8ee4c74f6694dbf0, entries=22, sequenceid=93, filesize=7.5 K 2024-11-23T15:27:12,590 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/.tmp/rep_barrier/df18c77a5e824a2f9cd68a82c11ec0eb as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/rep_barrier/df18c77a5e824a2f9cd68a82c11ec0eb 2024-11-23T15:27:12,592 INFO [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/rep_barrier/df18c77a5e824a2f9cd68a82c11ec0eb, entries=6, sequenceid=93, filesize=5.9 K 2024-11-23T15:27:12,593 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/.tmp/table/6f7525aeca08469dbd172f12b69aba7e as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/table/6f7525aeca08469dbd172f12b69aba7e 2024-11-23T15:27:12,595 INFO [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/table/6f7525aeca08469dbd172f12b69aba7e, entries=9, sequenceid=93, filesize=5.8 K 2024-11-23T15:27:12,596 INFO [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1280ms, sequenceid=93, compaction requested=false 2024-11-23T15:27:12,599 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-23T15:27:12,599 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T15:27:12,600 INFO [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-23T15:27:12,600 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-23T15:27:12,600 DEBUG [RS_CLOSE_META-regionserver/6a36843bf905:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T15:27:12,718 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1250): stopping server 6a36843bf905,33811,1732375456985; all regions closed. 2024-11-23T15:27:12,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741834_1010 (size=26050) 2024-11-23T15:27:12,724 DEBUG [RS:0;6a36843bf905:33811 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/oldWALs 2024-11-23T15:27:12,724 INFO [RS:0;6a36843bf905:33811 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6a36843bf905%2C33811%2C1732375456985.meta:.meta(num 1732375459925) 2024-11-23T15:27:12,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741832_1008 (size=13148899) 2024-11-23T15:27:12,728 DEBUG [RS:0;6a36843bf905:33811 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/oldWALs 2024-11-23T15:27:12,728 INFO [RS:0;6a36843bf905:33811 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6a36843bf905%2C33811%2C1732375456985:(num 1732375458960) 2024-11-23T15:27:12,728 DEBUG [RS:0;6a36843bf905:33811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:12,728 INFO [RS:0;6a36843bf905:33811 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T15:27:12,728 INFO [RS:0;6a36843bf905:33811 {}] hbase.ChoreService(370): Chore service for: regionserver/6a36843bf905:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-23T15:27:12,728 INFO [regionserver/6a36843bf905:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-23T15:27:12,729 INFO [RS:0;6a36843bf905:33811 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33811 2024-11-23T15:27:12,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6a36843bf905,33811,1732375456985 2024-11-23T15:27:12,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T15:27:12,734 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6a36843bf905,33811,1732375456985] 2024-11-23T15:27:12,734 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 6a36843bf905,33811,1732375456985; numProcessing=1 2024-11-23T15:27:12,735 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/6a36843bf905,33811,1732375456985 already deleted, retry=false 2024-11-23T15:27:12,735 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 6a36843bf905,33811,1732375456985 expired; onlineServers=0 2024-11-23T15:27:12,735 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6a36843bf905,35555,1732375456260' ***** 2024-11-23T15:27:12,735 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T15:27:12,736 DEBUG [M:0;6a36843bf905:35555 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41d66913, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6a36843bf905/172.17.0.2:0 2024-11-23T15:27:12,736 INFO [M:0;6a36843bf905:35555 {}] regionserver.HRegionServer(1224): stopping server 6a36843bf905,35555,1732375456260 2024-11-23T15:27:12,736 INFO [M:0;6a36843bf905:35555 {}] regionserver.HRegionServer(1250): stopping server 6a36843bf905,35555,1732375456260; all regions closed. 2024-11-23T15:27:12,736 DEBUG [M:0;6a36843bf905:35555 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T15:27:12,736 DEBUG [M:0;6a36843bf905:35555 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T15:27:12,736 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T15:27:12,736 DEBUG [M:0;6a36843bf905:35555 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T15:27:12,736 DEBUG [master/6a36843bf905:0:becomeActiveMaster-HFileCleaner.large.0-1732375458684 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a36843bf905:0:becomeActiveMaster-HFileCleaner.large.0-1732375458684,5,FailOnTimeoutGroup] 2024-11-23T15:27:12,736 DEBUG [master/6a36843bf905:0:becomeActiveMaster-HFileCleaner.small.0-1732375458685 {}] cleaner.HFileCleaner(306): Exit Thread[master/6a36843bf905:0:becomeActiveMaster-HFileCleaner.small.0-1732375458685,5,FailOnTimeoutGroup] 2024-11-23T15:27:12,736 INFO [M:0;6a36843bf905:35555 {}] hbase.ChoreService(370): Chore service for: master/6a36843bf905:0 had [] on shutdown 2024-11-23T15:27:12,736 DEBUG [M:0;6a36843bf905:35555 {}] master.HMaster(1733): Stopping service threads 2024-11-23T15:27:12,736 INFO [M:0;6a36843bf905:35555 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T15:27:12,737 ERROR [M:0;6a36843bf905:35555 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:40979 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:40979,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-23T15:27:12,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T15:27:12,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T15:27:12,737 INFO [M:0;6a36843bf905:35555 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T15:27:12,737 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T15:27:12,737 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T15:27:12,738 DEBUG [M:0;6a36843bf905:35555 {}] zookeeper.ZKUtil(347): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T15:27:12,738 WARN [M:0;6a36843bf905:35555 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T15:27:12,738 INFO [M:0;6a36843bf905:35555 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-23T15:27:12,738 INFO [M:0;6a36843bf905:35555 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T15:27:12,738 DEBUG [M:0;6a36843bf905:35555 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T15:27:12,738 INFO [M:0;6a36843bf905:35555 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:27:12,738 DEBUG [M:0;6a36843bf905:35555 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:27:12,738 DEBUG [M:0;6a36843bf905:35555 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T15:27:12,738 DEBUG [M:0;6a36843bf905:35555 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:27:12,738 INFO [M:0;6a36843bf905:35555 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=748.66 KB heapSize=918.88 KB 2024-11-23T15:27:12,753 DEBUG [M:0;6a36843bf905:35555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0565d26e8c0c4b3393960f4718eb2697 is 82, key is hbase:meta,,1/info:regioninfo/1732375460069/Put/seqid=0 2024-11-23T15:27:12,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742399_1575 (size=5672) 2024-11-23T15:27:12,834 INFO [RS:0;6a36843bf905:33811 {}] regionserver.HRegionServer(1307): Exiting; stopping=6a36843bf905,33811,1732375456985; zookeeper connection closed. 2024-11-23T15:27:12,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:27:12,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33811-0x10024fb5fcd0001, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:27:12,835 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5249cc01 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5249cc01 2024-11-23T15:27:12,835 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T15:27:13,157 INFO [M:0;6a36843bf905:35555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2096 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0565d26e8c0c4b3393960f4718eb2697 2024-11-23T15:27:13,177 DEBUG [M:0;6a36843bf905:35555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc2a856ff8714d629ccf8ab1a4be82f8 is 2283, key is \x00\x00\x00\x00\x00\x00\x00*/proc:d/1732375490841/Put/seqid=0 2024-11-23T15:27:13,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742400_1576 (size=43996) 2024-11-23T15:27:13,581 INFO [M:0;6a36843bf905:35555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=748.10 KB at sequenceid=2096 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc2a856ff8714d629ccf8ab1a4be82f8 2024-11-23T15:27:13,585 INFO [M:0;6a36843bf905:35555 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for dc2a856ff8714d629ccf8ab1a4be82f8 2024-11-23T15:27:13,599 DEBUG [M:0;6a36843bf905:35555 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6363edeb812849d1b40cc895d10bcaf3 is 69, key is 6a36843bf905,33811,1732375456985/rs:state/1732375458727/Put/seqid=0 2024-11-23T15:27:13,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073742401_1577 (size=5156) 2024-11-23T15:27:14,003 INFO [M:0;6a36843bf905:35555 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2096 (bloomFilter=true), to=hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6363edeb812849d1b40cc895d10bcaf3 2024-11-23T15:27:14,007 DEBUG [M:0;6a36843bf905:35555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0565d26e8c0c4b3393960f4718eb2697 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0565d26e8c0c4b3393960f4718eb2697 2024-11-23T15:27:14,009 INFO [M:0;6a36843bf905:35555 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0565d26e8c0c4b3393960f4718eb2697, entries=8, sequenceid=2096, filesize=5.5 K 2024-11-23T15:27:14,010 DEBUG [M:0;6a36843bf905:35555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc2a856ff8714d629ccf8ab1a4be82f8 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dc2a856ff8714d629ccf8ab1a4be82f8 2024-11-23T15:27:14,012 INFO [M:0;6a36843bf905:35555 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for dc2a856ff8714d629ccf8ab1a4be82f8 2024-11-23T15:27:14,012 INFO [M:0;6a36843bf905:35555 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dc2a856ff8714d629ccf8ab1a4be82f8, entries=173, sequenceid=2096, filesize=43.0 K 2024-11-23T15:27:14,013 DEBUG [M:0;6a36843bf905:35555 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6363edeb812849d1b40cc895d10bcaf3 as hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6363edeb812849d1b40cc895d10bcaf3 2024-11-23T15:27:14,015 INFO [M:0;6a36843bf905:35555 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40979/user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6363edeb812849d1b40cc895d10bcaf3, entries=1, sequenceid=2096, filesize=5.0 K 2024-11-23T15:27:14,016 INFO [M:0;6a36843bf905:35555 {}] regionserver.HRegion(3040): Finished flush of dataSize ~748.66 KB/766627, heapSize ~918.59 KB/940632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1277ms, sequenceid=2096, compaction requested=false 2024-11-23T15:27:14,017 INFO [M:0;6a36843bf905:35555 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T15:27:14,017 DEBUG [M:0;6a36843bf905:35555 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-23T15:27:14,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33485 is added to blk_1073741830_1006 (size=904082) 2024-11-23T15:27:14,019 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/3cc5bea5-0be2-f7bd-693f-2c46d8306704/MasterData/WALs/6a36843bf905,35555,1732375456260/6a36843bf905%2C35555%2C1732375456260.1732375458172 not finished, retry = 0 2024-11-23T15:27:14,120 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-23T15:27:14,120 INFO [M:0;6a36843bf905:35555 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-23T15:27:14,120 INFO [M:0;6a36843bf905:35555 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35555 2024-11-23T15:27:14,122 DEBUG [M:0;6a36843bf905:35555 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/6a36843bf905,35555,1732375456260 already deleted, retry=false 2024-11-23T15:27:14,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:27:14,224 INFO [M:0;6a36843bf905:35555 {}] regionserver.HRegionServer(1307): Exiting; stopping=6a36843bf905,35555,1732375456260; zookeeper connection closed. 2024-11-23T15:27:14,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35555-0x10024fb5fcd0000, quorum=127.0.0.1:62881, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T15:27:14,229 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T15:27:14,231 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T15:27:14,231 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T15:27:14,231 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T15:27:14,231 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/hadoop.log.dir/,STOPPED} 2024-11-23T15:27:14,234 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T15:27:14,234 WARN [BP-1918544696-172.17.0.2-1732375453586 heartbeating to localhost/127.0.0.1:40979 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T15:27:14,234 WARN [BP-1918544696-172.17.0.2-1732375453586 heartbeating to localhost/127.0.0.1:40979 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1918544696-172.17.0.2-1732375453586 (Datanode Uuid fa81f3d8-5d4c-4f82-8f34-6d25fc680d96) service to localhost/127.0.0.1:40979 2024-11-23T15:27:14,234 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T15:27:14,236 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/cluster_ab4327e8-be6a-a24d-51a5-85c3c7bb54b1/dfs/data/data1/current/BP-1918544696-172.17.0.2-1732375453586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:27:14,236 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/cluster_ab4327e8-be6a-a24d-51a5-85c3c7bb54b1/dfs/data/data2/current/BP-1918544696-172.17.0.2-1732375453586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T15:27:14,237 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T15:27:14,245 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T15:27:14,246 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T15:27:14,246 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T15:27:14,246 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T15:27:14,246 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/45616010-fcc1-6076-40c6-648ed4f2daa4/hadoop.log.dir/,STOPPED} 2024-11-23T15:27:14,264 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-23T15:27:14,390 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down